1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py 2; RUN: llc -mtriple=riscv64 -mattr=+zve64d,+f,+d,+zvfh,+zvfbfmin \ 3; RUN: -verify-machineinstrs < %s | FileCheck %s 4 5declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i8(target("riscv.vector.tuple", <vscale x 1 x i8>, 2), ptr, <vscale x 1 x i8>, i64, i64) 6declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i8.nxv1i1(target("riscv.vector.tuple", <vscale x 1 x i8>, 2), ptr, <vscale x 1 x i8>, <vscale x 1 x i1>, i64, i64) 7 8define void @test_vsoxseg2_nxv1i8_triscv.vector.tuple_nxv1i8_2t_nxv1i8(target("riscv.vector.tuple", <vscale x 1 x i8>, 2) %val, ptr %base, <vscale x 1 x i8> %index, i64 %vl) { 9; CHECK-LABEL: test_vsoxseg2_nxv1i8_triscv.vector.tuple_nxv1i8_2t_nxv1i8: 10; CHECK: # %bb.0: # %entry 11; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma 12; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10 13; CHECK-NEXT: ret 14entry: 15 tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i8(target("riscv.vector.tuple", <vscale x 1 x i8>, 2) %val, ptr %base, <vscale x 1 x i8> %index, i64 %vl, i64 3) 16 ret void 17} 18 19define void @test_vsoxseg2_mask_nxv1i8_triscv.vector.tuple_nxv1i8_2t_nxv1i8(target("riscv.vector.tuple", <vscale x 1 x i8>, 2) %val, ptr %base, <vscale x 1 x i8> %index, i64 %vl, <vscale x 1 x i1> %mask) { 20; CHECK-LABEL: test_vsoxseg2_mask_nxv1i8_triscv.vector.tuple_nxv1i8_2t_nxv1i8: 21; CHECK: # %bb.0: # %entry 22; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma 23; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10, v0.t 24; CHECK-NEXT: ret 25entry: 26 tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i8.nxv1i1(target("riscv.vector.tuple", <vscale x 1 x i8>, 2) %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 3) 27 ret void 28} 29 30declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i16(target("riscv.vector.tuple", <vscale x 1 x i8>, 2), ptr, <vscale x 1 x i16>, i64, i64) 31declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i16.nxv1i1(target("riscv.vector.tuple", <vscale x 1 x i8>, 2), ptr, <vscale x 1 x i16>, <vscale x 1 x i1>, i64, i64) 32 33define void @test_vsoxseg2_nxv1i8_triscv.vector.tuple_nxv1i8_2t_nxv1i16(target("riscv.vector.tuple", <vscale x 1 x i8>, 2) %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl) { 34; CHECK-LABEL: test_vsoxseg2_nxv1i8_triscv.vector.tuple_nxv1i8_2t_nxv1i16: 35; CHECK: # %bb.0: # %entry 36; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma 37; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10 38; CHECK-NEXT: ret 39entry: 40 tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i16(target("riscv.vector.tuple", <vscale x 1 x i8>, 2) %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl, i64 3) 41 ret void 42} 43 44define void @test_vsoxseg2_mask_nxv1i8_triscv.vector.tuple_nxv1i8_2t_nxv1i16(target("riscv.vector.tuple", <vscale x 1 x i8>, 2) %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl, <vscale x 1 x i1> %mask) { 45; CHECK-LABEL: test_vsoxseg2_mask_nxv1i8_triscv.vector.tuple_nxv1i8_2t_nxv1i16: 46; CHECK: # %bb.0: # %entry 47; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma 48; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10, v0.t 49; CHECK-NEXT: ret 50entry: 51 tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i16.nxv1i1(target("riscv.vector.tuple", <vscale x 1 x i8>, 2) %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 3) 52 ret void 53} 54 55declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i32(target("riscv.vector.tuple", <vscale x 1 x i8>, 2), ptr, <vscale x 1 x i32>, i64, i64) 56declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i32.nxv1i1(target("riscv.vector.tuple", <vscale x 1 x i8>, 2), ptr, <vscale x 1 x i32>, <vscale x 1 x i1>, i64, i64) 57 58define void @test_vsoxseg2_nxv1i8_triscv.vector.tuple_nxv1i8_2t_nxv1i32(target("riscv.vector.tuple", <vscale x 1 x i8>, 2) %val, ptr %base, <vscale x 1 x i32> %index, i64 %vl) { 59; CHECK-LABEL: test_vsoxseg2_nxv1i8_triscv.vector.tuple_nxv1i8_2t_nxv1i32: 60; CHECK: # %bb.0: # %entry 61; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma 62; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10 63; CHECK-NEXT: ret 64entry: 65 tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i32(target("riscv.vector.tuple", <vscale x 1 x i8>, 2) %val, ptr %base, <vscale x 1 x i32> %index, i64 %vl, i64 3) 66 ret void 67} 68 69define void @test_vsoxseg2_mask_nxv1i8_triscv.vector.tuple_nxv1i8_2t_nxv1i32(target("riscv.vector.tuple", <vscale x 1 x i8>, 2) %val, ptr %base, <vscale x 1 x i32> %index, i64 %vl, <vscale x 1 x i1> %mask) { 70; CHECK-LABEL: test_vsoxseg2_mask_nxv1i8_triscv.vector.tuple_nxv1i8_2t_nxv1i32: 71; CHECK: # %bb.0: # %entry 72; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma 73; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10, v0.t 74; CHECK-NEXT: ret 75entry: 76 tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i32.nxv1i1(target("riscv.vector.tuple", <vscale x 1 x i8>, 2) %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 3) 77 ret void 78} 79 80declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i64(target("riscv.vector.tuple", <vscale x 1 x i8>, 2), ptr, <vscale x 1 x i64>, i64, i64) 81declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i64.nxv1i1(target("riscv.vector.tuple", <vscale x 1 x i8>, 2), ptr, <vscale x 1 x i64>, <vscale x 1 x i1>, i64, i64) 82 83define void @test_vsoxseg2_nxv1i8_triscv.vector.tuple_nxv1i8_2t_nxv1i64(target("riscv.vector.tuple", <vscale x 1 x i8>, 2) %val, ptr %base, <vscale x 1 x i64> %index, i64 %vl) { 84; CHECK-LABEL: test_vsoxseg2_nxv1i8_triscv.vector.tuple_nxv1i8_2t_nxv1i64: 85; CHECK: # %bb.0: # %entry 86; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma 87; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v10 88; CHECK-NEXT: ret 89entry: 90 tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i64(target("riscv.vector.tuple", <vscale x 1 x i8>, 2) %val, ptr %base, <vscale x 1 x i64> %index, i64 %vl, i64 3) 91 ret void 92} 93 94define void @test_vsoxseg2_mask_nxv1i8_triscv.vector.tuple_nxv1i8_2t_nxv1i64(target("riscv.vector.tuple", <vscale x 1 x i8>, 2) %val, ptr %base, <vscale x 1 x i64> %index, i64 %vl, <vscale x 1 x i1> %mask) { 95; CHECK-LABEL: test_vsoxseg2_mask_nxv1i8_triscv.vector.tuple_nxv1i8_2t_nxv1i64: 96; CHECK: # %bb.0: # %entry 97; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma 98; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v10, v0.t 99; CHECK-NEXT: ret 100entry: 101 tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i64.nxv1i1(target("riscv.vector.tuple", <vscale x 1 x i8>, 2) %val, ptr %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 3) 102 ret void 103} 104 105declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i8(target("riscv.vector.tuple", <vscale x 2 x i8>, 2), ptr, <vscale x 2 x i8>, i64, i64) 106declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i8.nxv2i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 2), ptr, <vscale x 2 x i8>, <vscale x 2 x i1>, i64, i64) 107 108define void @test_vsoxseg2_nxv2i8_triscv.vector.tuple_nxv2i8_2t_nxv2i8(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) %val, ptr %base, <vscale x 2 x i8> %index, i64 %vl) { 109; CHECK-LABEL: test_vsoxseg2_nxv2i8_triscv.vector.tuple_nxv2i8_2t_nxv2i8: 110; CHECK: # %bb.0: # %entry 111; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma 112; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10 113; CHECK-NEXT: ret 114entry: 115 tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i8(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) %val, ptr %base, <vscale x 2 x i8> %index, i64 %vl, i64 3) 116 ret void 117} 118 119define void @test_vsoxseg2_mask_nxv2i8_triscv.vector.tuple_nxv2i8_2t_nxv2i8(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) %val, ptr %base, <vscale x 2 x i8> %index, i64 %vl, <vscale x 2 x i1> %mask) { 120; CHECK-LABEL: test_vsoxseg2_mask_nxv2i8_triscv.vector.tuple_nxv2i8_2t_nxv2i8: 121; CHECK: # %bb.0: # %entry 122; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma 123; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10, v0.t 124; CHECK-NEXT: ret 125entry: 126 tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i8.nxv2i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 3) 127 ret void 128} 129 130declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i16(target("riscv.vector.tuple", <vscale x 2 x i8>, 2), ptr, <vscale x 2 x i16>, i64, i64) 131declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i16.nxv2i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 2), ptr, <vscale x 2 x i16>, <vscale x 2 x i1>, i64, i64) 132 133define void @test_vsoxseg2_nxv2i8_triscv.vector.tuple_nxv2i8_2t_nxv2i16(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) %val, ptr %base, <vscale x 2 x i16> %index, i64 %vl) { 134; CHECK-LABEL: test_vsoxseg2_nxv2i8_triscv.vector.tuple_nxv2i8_2t_nxv2i16: 135; CHECK: # %bb.0: # %entry 136; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma 137; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10 138; CHECK-NEXT: ret 139entry: 140 tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i16(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) %val, ptr %base, <vscale x 2 x i16> %index, i64 %vl, i64 3) 141 ret void 142} 143 144define void @test_vsoxseg2_mask_nxv2i8_triscv.vector.tuple_nxv2i8_2t_nxv2i16(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) %val, ptr %base, <vscale x 2 x i16> %index, i64 %vl, <vscale x 2 x i1> %mask) { 145; CHECK-LABEL: test_vsoxseg2_mask_nxv2i8_triscv.vector.tuple_nxv2i8_2t_nxv2i16: 146; CHECK: # %bb.0: # %entry 147; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma 148; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10, v0.t 149; CHECK-NEXT: ret 150entry: 151 tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i16.nxv2i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 3) 152 ret void 153} 154 155declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i32(target("riscv.vector.tuple", <vscale x 2 x i8>, 2), ptr, <vscale x 2 x i32>, i64, i64) 156declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i32.nxv2i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 2), ptr, <vscale x 2 x i32>, <vscale x 2 x i1>, i64, i64) 157 158define void @test_vsoxseg2_nxv2i8_triscv.vector.tuple_nxv2i8_2t_nxv2i32(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) %val, ptr %base, <vscale x 2 x i32> %index, i64 %vl) { 159; CHECK-LABEL: test_vsoxseg2_nxv2i8_triscv.vector.tuple_nxv2i8_2t_nxv2i32: 160; CHECK: # %bb.0: # %entry 161; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma 162; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10 163; CHECK-NEXT: ret 164entry: 165 tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i32(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) %val, ptr %base, <vscale x 2 x i32> %index, i64 %vl, i64 3) 166 ret void 167} 168 169define void @test_vsoxseg2_mask_nxv2i8_triscv.vector.tuple_nxv2i8_2t_nxv2i32(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) %val, ptr %base, <vscale x 2 x i32> %index, i64 %vl, <vscale x 2 x i1> %mask) { 170; CHECK-LABEL: test_vsoxseg2_mask_nxv2i8_triscv.vector.tuple_nxv2i8_2t_nxv2i32: 171; CHECK: # %bb.0: # %entry 172; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma 173; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10, v0.t 174; CHECK-NEXT: ret 175entry: 176 tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i32.nxv2i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 3) 177 ret void 178} 179 180declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 2), ptr, <vscale x 2 x i64>, i64, i64) 181declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i64.nxv2i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 2), ptr, <vscale x 2 x i64>, <vscale x 2 x i1>, i64, i64) 182 183define void @test_vsoxseg2_nxv2i8_triscv.vector.tuple_nxv2i8_2t_nxv2i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl) { 184; CHECK-LABEL: test_vsoxseg2_nxv2i8_triscv.vector.tuple_nxv2i8_2t_nxv2i64: 185; CHECK: # %bb.0: # %entry 186; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma 187; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v10 188; CHECK-NEXT: ret 189entry: 190 tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl, i64 3) 191 ret void 192} 193 194define void @test_vsoxseg2_mask_nxv2i8_triscv.vector.tuple_nxv2i8_2t_nxv2i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl, <vscale x 2 x i1> %mask) { 195; CHECK-LABEL: test_vsoxseg2_mask_nxv2i8_triscv.vector.tuple_nxv2i8_2t_nxv2i64: 196; CHECK: # %bb.0: # %entry 197; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma 198; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v10, v0.t 199; CHECK-NEXT: ret 200entry: 201 tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i64.nxv2i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) %val, ptr %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 3) 202 ret void 203} 204 205declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 2), ptr, <vscale x 4 x i8>, i64, i64) 206declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i8.nxv4i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 2), ptr, <vscale x 4 x i8>, <vscale x 4 x i1>, i64, i64) 207 208define void @test_vsoxseg2_nxv4i8_triscv.vector.tuple_nxv4i8_2t_nxv4i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %val, ptr %base, <vscale x 4 x i8> %index, i64 %vl) { 209; CHECK-LABEL: test_vsoxseg2_nxv4i8_triscv.vector.tuple_nxv4i8_2t_nxv4i8: 210; CHECK: # %bb.0: # %entry 211; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma 212; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10 213; CHECK-NEXT: ret 214entry: 215 tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %val, ptr %base, <vscale x 4 x i8> %index, i64 %vl, i64 3) 216 ret void 217} 218 219define void @test_vsoxseg2_mask_nxv4i8_triscv.vector.tuple_nxv4i8_2t_nxv4i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %val, ptr %base, <vscale x 4 x i8> %index, i64 %vl, <vscale x 4 x i1> %mask) { 220; CHECK-LABEL: test_vsoxseg2_mask_nxv4i8_triscv.vector.tuple_nxv4i8_2t_nxv4i8: 221; CHECK: # %bb.0: # %entry 222; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma 223; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10, v0.t 224; CHECK-NEXT: ret 225entry: 226 tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i8.nxv4i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %val, ptr %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 3) 227 ret void 228} 229 230declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 2), ptr, <vscale x 4 x i16>, i64, i64) 231declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i16.nxv4i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 2), ptr, <vscale x 4 x i16>, <vscale x 4 x i1>, i64, i64) 232 233define void @test_vsoxseg2_nxv4i8_triscv.vector.tuple_nxv4i8_2t_nxv4i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %val, ptr %base, <vscale x 4 x i16> %index, i64 %vl) { 234; CHECK-LABEL: test_vsoxseg2_nxv4i8_triscv.vector.tuple_nxv4i8_2t_nxv4i16: 235; CHECK: # %bb.0: # %entry 236; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma 237; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10 238; CHECK-NEXT: ret 239entry: 240 tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %val, ptr %base, <vscale x 4 x i16> %index, i64 %vl, i64 3) 241 ret void 242} 243 244define void @test_vsoxseg2_mask_nxv4i8_triscv.vector.tuple_nxv4i8_2t_nxv4i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %val, ptr %base, <vscale x 4 x i16> %index, i64 %vl, <vscale x 4 x i1> %mask) { 245; CHECK-LABEL: test_vsoxseg2_mask_nxv4i8_triscv.vector.tuple_nxv4i8_2t_nxv4i16: 246; CHECK: # %bb.0: # %entry 247; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma 248; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10, v0.t 249; CHECK-NEXT: ret 250entry: 251 tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i16.nxv4i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %val, ptr %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 3) 252 ret void 253} 254 255declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 2), ptr, <vscale x 4 x i32>, i64, i64) 256declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i32.nxv4i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 2), ptr, <vscale x 4 x i32>, <vscale x 4 x i1>, i64, i64) 257 258define void @test_vsoxseg2_nxv4i8_triscv.vector.tuple_nxv4i8_2t_nxv4i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %val, ptr %base, <vscale x 4 x i32> %index, i64 %vl) { 259; CHECK-LABEL: test_vsoxseg2_nxv4i8_triscv.vector.tuple_nxv4i8_2t_nxv4i32: 260; CHECK: # %bb.0: # %entry 261; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma 262; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10 263; CHECK-NEXT: ret 264entry: 265 tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %val, ptr %base, <vscale x 4 x i32> %index, i64 %vl, i64 3) 266 ret void 267} 268 269define void @test_vsoxseg2_mask_nxv4i8_triscv.vector.tuple_nxv4i8_2t_nxv4i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %val, ptr %base, <vscale x 4 x i32> %index, i64 %vl, <vscale x 4 x i1> %mask) { 270; CHECK-LABEL: test_vsoxseg2_mask_nxv4i8_triscv.vector.tuple_nxv4i8_2t_nxv4i32: 271; CHECK: # %bb.0: # %entry 272; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma 273; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10, v0.t 274; CHECK-NEXT: ret 275entry: 276 tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i32.nxv4i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 3) 277 ret void 278} 279 280declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 2), ptr, <vscale x 4 x i64>, i64, i64) 281declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i64.nxv4i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 2), ptr, <vscale x 4 x i64>, <vscale x 4 x i1>, i64, i64) 282 283define void @test_vsoxseg2_nxv4i8_triscv.vector.tuple_nxv4i8_2t_nxv4i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %val, ptr %base, <vscale x 4 x i64> %index, i64 %vl) { 284; CHECK-LABEL: test_vsoxseg2_nxv4i8_triscv.vector.tuple_nxv4i8_2t_nxv4i64: 285; CHECK: # %bb.0: # %entry 286; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma 287; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v12 288; CHECK-NEXT: ret 289entry: 290 tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %val, ptr %base, <vscale x 4 x i64> %index, i64 %vl, i64 3) 291 ret void 292} 293 294define void @test_vsoxseg2_mask_nxv4i8_triscv.vector.tuple_nxv4i8_2t_nxv4i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %val, ptr %base, <vscale x 4 x i64> %index, i64 %vl, <vscale x 4 x i1> %mask) { 295; CHECK-LABEL: test_vsoxseg2_mask_nxv4i8_triscv.vector.tuple_nxv4i8_2t_nxv4i64: 296; CHECK: # %bb.0: # %entry 297; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma 298; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v12, v0.t 299; CHECK-NEXT: ret 300entry: 301 tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i64.nxv4i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %val, ptr %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 3) 302 ret void 303} 304 305declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 2), ptr, <vscale x 8 x i8>, i64, i64) 306declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i8.nxv8i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 2), ptr, <vscale x 8 x i8>, <vscale x 8 x i1>, i64, i64) 307 308define void @test_vsoxseg2_nxv8i8_triscv.vector.tuple_nxv8i8_2t_nxv8i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, <vscale x 8 x i8> %index, i64 %vl) { 309; CHECK-LABEL: test_vsoxseg2_nxv8i8_triscv.vector.tuple_nxv8i8_2t_nxv8i8: 310; CHECK: # %bb.0: # %entry 311; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma 312; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10 313; CHECK-NEXT: ret 314entry: 315 tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, <vscale x 8 x i8> %index, i64 %vl, i64 3) 316 ret void 317} 318 319define void @test_vsoxseg2_mask_nxv8i8_triscv.vector.tuple_nxv8i8_2t_nxv8i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, <vscale x 8 x i8> %index, i64 %vl, <vscale x 8 x i1> %mask) { 320; CHECK-LABEL: test_vsoxseg2_mask_nxv8i8_triscv.vector.tuple_nxv8i8_2t_nxv8i8: 321; CHECK: # %bb.0: # %entry 322; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma 323; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10, v0.t 324; CHECK-NEXT: ret 325entry: 326 tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i8.nxv8i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, <vscale x 8 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 3) 327 ret void 328} 329 330declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 2), ptr, <vscale x 8 x i16>, i64, i64) 331declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i16.nxv8i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 2), ptr, <vscale x 8 x i16>, <vscale x 8 x i1>, i64, i64) 332 333define void @test_vsoxseg2_nxv8i8_triscv.vector.tuple_nxv8i8_2t_nxv8i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, <vscale x 8 x i16> %index, i64 %vl) { 334; CHECK-LABEL: test_vsoxseg2_nxv8i8_triscv.vector.tuple_nxv8i8_2t_nxv8i16: 335; CHECK: # %bb.0: # %entry 336; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma 337; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10 338; CHECK-NEXT: ret 339entry: 340 tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, <vscale x 8 x i16> %index, i64 %vl, i64 3) 341 ret void 342} 343 344define void @test_vsoxseg2_mask_nxv8i8_triscv.vector.tuple_nxv8i8_2t_nxv8i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, <vscale x 8 x i16> %index, i64 %vl, <vscale x 8 x i1> %mask) { 345; CHECK-LABEL: test_vsoxseg2_mask_nxv8i8_triscv.vector.tuple_nxv8i8_2t_nxv8i16: 346; CHECK: # %bb.0: # %entry 347; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma 348; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10, v0.t 349; CHECK-NEXT: ret 350entry: 351 tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i16.nxv8i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 3) 352 ret void 353} 354 355declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 2), ptr, <vscale x 8 x i32>, i64, i64) 356declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i32.nxv8i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 2), ptr, <vscale x 8 x i32>, <vscale x 8 x i1>, i64, i64) 357 358define void @test_vsoxseg2_nxv8i8_triscv.vector.tuple_nxv8i8_2t_nxv8i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, <vscale x 8 x i32> %index, i64 %vl) { 359; CHECK-LABEL: test_vsoxseg2_nxv8i8_triscv.vector.tuple_nxv8i8_2t_nxv8i32: 360; CHECK: # %bb.0: # %entry 361; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma 362; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v12 363; CHECK-NEXT: ret 364entry: 365 tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, <vscale x 8 x i32> %index, i64 %vl, i64 3) 366 ret void 367} 368 369define void @test_vsoxseg2_mask_nxv8i8_triscv.vector.tuple_nxv8i8_2t_nxv8i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, <vscale x 8 x i32> %index, i64 %vl, <vscale x 8 x i1> %mask) { 370; CHECK-LABEL: test_vsoxseg2_mask_nxv8i8_triscv.vector.tuple_nxv8i8_2t_nxv8i32: 371; CHECK: # %bb.0: # %entry 372; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma 373; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v12, v0.t 374; CHECK-NEXT: ret 375entry: 376 tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i32.nxv8i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 3) 377 ret void 378} 379 380declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 2), ptr, <vscale x 8 x i64>, i64, i64) 381declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i64.nxv8i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 2), ptr, <vscale x 8 x i64>, <vscale x 8 x i1>, i64, i64) 382 383define void @test_vsoxseg2_nxv8i8_triscv.vector.tuple_nxv8i8_2t_nxv8i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, <vscale x 8 x i64> %index, i64 %vl) { 384; CHECK-LABEL: test_vsoxseg2_nxv8i8_triscv.vector.tuple_nxv8i8_2t_nxv8i64: 385; CHECK: # %bb.0: # %entry 386; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma 387; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v16 388; CHECK-NEXT: ret 389entry: 390 tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, <vscale x 8 x i64> %index, i64 %vl, i64 3) 391 ret void 392} 393 394define void @test_vsoxseg2_mask_nxv8i8_triscv.vector.tuple_nxv8i8_2t_nxv8i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, <vscale x 8 x i64> %index, i64 %vl, <vscale x 8 x i1> %mask) { 395; CHECK-LABEL: test_vsoxseg2_mask_nxv8i8_triscv.vector.tuple_nxv8i8_2t_nxv8i64: 396; CHECK: # %bb.0: # %entry 397; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma 398; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v16, v0.t 399; CHECK-NEXT: ret 400entry: 401 tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i64.nxv8i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, <vscale x 8 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 3) 402 ret void 403} 404 405declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i8(target("riscv.vector.tuple", <vscale x 16 x i8>, 2), ptr, <vscale x 16 x i8>, i64, i64) 406declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i8.nxv16i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 2), ptr, <vscale x 16 x i8>, <vscale x 16 x i1>, i64, i64) 407 408define void @test_vsoxseg2_nxv16i8_triscv.vector.tuple_nxv16i8_2t_nxv16i8(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, <vscale x 16 x i8> %index, i64 %vl) { 409; CHECK-LABEL: test_vsoxseg2_nxv16i8_triscv.vector.tuple_nxv16i8_2t_nxv16i8: 410; CHECK: # %bb.0: # %entry 411; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma 412; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v12 413; CHECK-NEXT: ret 414entry: 415 tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i8(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, <vscale x 16 x i8> %index, i64 %vl, i64 3) 416 ret void 417} 418 419define void @test_vsoxseg2_mask_nxv16i8_triscv.vector.tuple_nxv16i8_2t_nxv16i8(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, <vscale x 16 x i8> %index, i64 %vl, <vscale x 16 x i1> %mask) { 420; CHECK-LABEL: test_vsoxseg2_mask_nxv16i8_triscv.vector.tuple_nxv16i8_2t_nxv16i8: 421; CHECK: # %bb.0: # %entry 422; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma 423; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v12, v0.t 424; CHECK-NEXT: ret 425entry: 426 tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i8.nxv16i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, <vscale x 16 x i8> %index, <vscale x 16 x i1> %mask, i64 %vl, i64 3) 427 ret void 428} 429 430declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i16(target("riscv.vector.tuple", <vscale x 16 x i8>, 2), ptr, <vscale x 16 x i16>, i64, i64) 431declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i16.nxv16i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 2), ptr, <vscale x 16 x i16>, <vscale x 16 x i1>, i64, i64) 432 433define void @test_vsoxseg2_nxv16i8_triscv.vector.tuple_nxv16i8_2t_nxv16i16(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, <vscale x 16 x i16> %index, i64 %vl) { 434; CHECK-LABEL: test_vsoxseg2_nxv16i8_triscv.vector.tuple_nxv16i8_2t_nxv16i16: 435; CHECK: # %bb.0: # %entry 436; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma 437; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v12 438; CHECK-NEXT: ret 439entry: 440 tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i16(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, <vscale x 16 x i16> %index, i64 %vl, i64 3) 441 ret void 442} 443 444define void @test_vsoxseg2_mask_nxv16i8_triscv.vector.tuple_nxv16i8_2t_nxv16i16(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, <vscale x 16 x i16> %index, i64 %vl, <vscale x 16 x i1> %mask) { 445; CHECK-LABEL: test_vsoxseg2_mask_nxv16i8_triscv.vector.tuple_nxv16i8_2t_nxv16i16: 446; CHECK: # %bb.0: # %entry 447; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma 448; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v12, v0.t 449; CHECK-NEXT: ret 450entry: 451 tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i16.nxv16i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, <vscale x 16 x i16> %index, <vscale x 16 x i1> %mask, i64 %vl, i64 3) 452 ret void 453} 454 455declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i32(target("riscv.vector.tuple", <vscale x 16 x i8>, 2), ptr, <vscale x 16 x i32>, i64, i64) 456declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i32.nxv16i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 2), ptr, <vscale x 16 x i32>, <vscale x 16 x i1>, i64, i64) 457 458define void @test_vsoxseg2_nxv16i8_triscv.vector.tuple_nxv16i8_2t_nxv16i32(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, <vscale x 16 x i32> %index, i64 %vl) { 459; CHECK-LABEL: test_vsoxseg2_nxv16i8_triscv.vector.tuple_nxv16i8_2t_nxv16i32: 460; CHECK: # %bb.0: # %entry 461; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma 462; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v16 463; CHECK-NEXT: ret 464entry: 465 tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i32(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, <vscale x 16 x i32> %index, i64 %vl, i64 3) 466 ret void 467} 468 469define void @test_vsoxseg2_mask_nxv16i8_triscv.vector.tuple_nxv16i8_2t_nxv16i32(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, <vscale x 16 x i32> %index, i64 %vl, <vscale x 16 x i1> %mask) { 470; CHECK-LABEL: test_vsoxseg2_mask_nxv16i8_triscv.vector.tuple_nxv16i8_2t_nxv16i32: 471; CHECK: # %bb.0: # %entry 472; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma 473; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v16, v0.t 474; CHECK-NEXT: ret 475entry: 476 tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i32.nxv16i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, <vscale x 16 x i32> %index, <vscale x 16 x i1> %mask, i64 %vl, i64 3) 477 ret void 478} 479 480declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv32i8(target("riscv.vector.tuple", <vscale x 32 x i8>, 2), ptr, <vscale x 32 x i8>, i64, i64) 481declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i8.nxv32i1(target("riscv.vector.tuple", <vscale x 32 x i8>, 2), ptr, <vscale x 32 x i8>, <vscale x 32 x i1>, i64, i64) 482 483define void @test_vsoxseg2_nxv32i8_triscv.vector.tuple_nxv32i8_2t_nxv32i8(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, <vscale x 32 x i8> %index, i64 %vl) { 484; CHECK-LABEL: test_vsoxseg2_nxv32i8_triscv.vector.tuple_nxv32i8_2t_nxv32i8: 485; CHECK: # %bb.0: # %entry 486; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma 487; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v16 488; CHECK-NEXT: ret 489entry: 490 tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv32i8(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, <vscale x 32 x i8> %index, i64 %vl, i64 3) 491 ret void 492} 493 494define void @test_vsoxseg2_mask_nxv32i8_triscv.vector.tuple_nxv32i8_2t_nxv32i8(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, <vscale x 32 x i8> %index, i64 %vl, <vscale x 32 x i1> %mask) { 495; CHECK-LABEL: test_vsoxseg2_mask_nxv32i8_triscv.vector.tuple_nxv32i8_2t_nxv32i8: 496; CHECK: # %bb.0: # %entry 497; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma 498; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v16, v0.t 499; CHECK-NEXT: ret 500entry: 501 tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i8.nxv32i1(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, <vscale x 32 x i8> %index, <vscale x 32 x i1> %mask, i64 %vl, i64 3) 502 ret void 503} 504 505declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv32i16(target("riscv.vector.tuple", <vscale x 32 x i8>, 2), ptr, <vscale x 32 x i16>, i64, i64) 506declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i16.nxv32i1(target("riscv.vector.tuple", <vscale x 32 x i8>, 2), ptr, <vscale x 32 x i16>, <vscale x 32 x i1>, i64, i64) 507 508define void @test_vsoxseg2_nxv32i8_triscv.vector.tuple_nxv32i8_2t_nxv32i16(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, <vscale x 32 x i16> %index, i64 %vl) { 509; CHECK-LABEL: test_vsoxseg2_nxv32i8_triscv.vector.tuple_nxv32i8_2t_nxv32i16: 510; CHECK: # %bb.0: # %entry 511; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma 512; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v16 513; CHECK-NEXT: ret 514entry: 515 tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv32i16(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, <vscale x 32 x i16> %index, i64 %vl, i64 3) 516 ret void 517} 518 519define void @test_vsoxseg2_mask_nxv32i8_triscv.vector.tuple_nxv32i8_2t_nxv32i16(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, <vscale x 32 x i16> %index, i64 %vl, <vscale x 32 x i1> %mask) { 520; CHECK-LABEL: test_vsoxseg2_mask_nxv32i8_triscv.vector.tuple_nxv32i8_2t_nxv32i16: 521; CHECK: # %bb.0: # %entry 522; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma 523; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v16, v0.t 524; CHECK-NEXT: ret 525entry: 526 tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i16.nxv32i1(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, <vscale x 32 x i16> %index, <vscale x 32 x i1> %mask, i64 %vl, i64 3) 527 ret void 528} 529 530declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i8(target("riscv.vector.tuple", <vscale x 1 x i8>, 3), ptr, <vscale x 1 x i8>, i64, i64) 531declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i8.nxv1i1(target("riscv.vector.tuple", <vscale x 1 x i8>, 3), ptr, <vscale x 1 x i8>, <vscale x 1 x i1>, i64, i64) 532 533define void @test_vsoxseg3_nxv1i8_triscv.vector.tuple_nxv1i8_3t_nxv1i8(target("riscv.vector.tuple", <vscale x 1 x i8>, 3) %val, ptr %base, <vscale x 1 x i8> %index, i64 %vl) { 534; CHECK-LABEL: test_vsoxseg3_nxv1i8_triscv.vector.tuple_nxv1i8_3t_nxv1i8: 535; CHECK: # %bb.0: # %entry 536; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma 537; CHECK-NEXT: vsoxseg3ei8.v v8, (a0), v11 538; CHECK-NEXT: ret 539entry: 540 tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i8(target("riscv.vector.tuple", <vscale x 1 x i8>, 3) %val, ptr %base, <vscale x 1 x i8> %index, i64 %vl, i64 3) 541 ret void 542} 543 544define void @test_vsoxseg3_mask_nxv1i8_triscv.vector.tuple_nxv1i8_3t_nxv1i8(target("riscv.vector.tuple", <vscale x 1 x i8>, 3) %val, ptr %base, <vscale x 1 x i8> %index, i64 %vl, <vscale x 1 x i1> %mask) { 545; CHECK-LABEL: test_vsoxseg3_mask_nxv1i8_triscv.vector.tuple_nxv1i8_3t_nxv1i8: 546; CHECK: # %bb.0: # %entry 547; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma 548; CHECK-NEXT: vsoxseg3ei8.v v8, (a0), v11, v0.t 549; CHECK-NEXT: ret 550entry: 551 tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i8.nxv1i1(target("riscv.vector.tuple", <vscale x 1 x i8>, 3) %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 3) 552 ret void 553} 554 555declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i16(target("riscv.vector.tuple", <vscale x 1 x i8>, 3), ptr, <vscale x 1 x i16>, i64, i64) 556declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i16.nxv1i1(target("riscv.vector.tuple", <vscale x 1 x i8>, 3), ptr, <vscale x 1 x i16>, <vscale x 1 x i1>, i64, i64) 557 558define void @test_vsoxseg3_nxv1i8_triscv.vector.tuple_nxv1i8_3t_nxv1i16(target("riscv.vector.tuple", <vscale x 1 x i8>, 3) %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl) { 559; CHECK-LABEL: test_vsoxseg3_nxv1i8_triscv.vector.tuple_nxv1i8_3t_nxv1i16: 560; CHECK: # %bb.0: # %entry 561; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma 562; CHECK-NEXT: vsoxseg3ei16.v v8, (a0), v11 563; CHECK-NEXT: ret 564entry: 565 tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i16(target("riscv.vector.tuple", <vscale x 1 x i8>, 3) %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl, i64 3) 566 ret void 567} 568 569define void @test_vsoxseg3_mask_nxv1i8_triscv.vector.tuple_nxv1i8_3t_nxv1i16(target("riscv.vector.tuple", <vscale x 1 x i8>, 3) %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl, <vscale x 1 x i1> %mask) { 570; CHECK-LABEL: test_vsoxseg3_mask_nxv1i8_triscv.vector.tuple_nxv1i8_3t_nxv1i16: 571; CHECK: # %bb.0: # %entry 572; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma 573; CHECK-NEXT: vsoxseg3ei16.v v8, (a0), v11, v0.t 574; CHECK-NEXT: ret 575entry: 576 tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i16.nxv1i1(target("riscv.vector.tuple", <vscale x 1 x i8>, 3) %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 3) 577 ret void 578} 579 580declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i32(target("riscv.vector.tuple", <vscale x 1 x i8>, 3), ptr, <vscale x 1 x i32>, i64, i64) 581declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i32.nxv1i1(target("riscv.vector.tuple", <vscale x 1 x i8>, 3), ptr, <vscale x 1 x i32>, <vscale x 1 x i1>, i64, i64) 582 583define void @test_vsoxseg3_nxv1i8_triscv.vector.tuple_nxv1i8_3t_nxv1i32(target("riscv.vector.tuple", <vscale x 1 x i8>, 3) %val, ptr %base, <vscale x 1 x i32> %index, i64 %vl) { 584; CHECK-LABEL: test_vsoxseg3_nxv1i8_triscv.vector.tuple_nxv1i8_3t_nxv1i32: 585; CHECK: # %bb.0: # %entry 586; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma 587; CHECK-NEXT: vsoxseg3ei32.v v8, (a0), v11 588; CHECK-NEXT: ret 589entry: 590 tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i32(target("riscv.vector.tuple", <vscale x 1 x i8>, 3) %val, ptr %base, <vscale x 1 x i32> %index, i64 %vl, i64 3) 591 ret void 592} 593 594define void @test_vsoxseg3_mask_nxv1i8_triscv.vector.tuple_nxv1i8_3t_nxv1i32(target("riscv.vector.tuple", <vscale x 1 x i8>, 3) %val, ptr %base, <vscale x 1 x i32> %index, i64 %vl, <vscale x 1 x i1> %mask) { 595; CHECK-LABEL: test_vsoxseg3_mask_nxv1i8_triscv.vector.tuple_nxv1i8_3t_nxv1i32: 596; CHECK: # %bb.0: # %entry 597; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma 598; CHECK-NEXT: vsoxseg3ei32.v v8, (a0), v11, v0.t 599; CHECK-NEXT: ret 600entry: 601 tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i32.nxv1i1(target("riscv.vector.tuple", <vscale x 1 x i8>, 3) %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 3) 602 ret void 603} 604 605declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i64(target("riscv.vector.tuple", <vscale x 1 x i8>, 3), ptr, <vscale x 1 x i64>, i64, i64) 606declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i64.nxv1i1(target("riscv.vector.tuple", <vscale x 1 x i8>, 3), ptr, <vscale x 1 x i64>, <vscale x 1 x i1>, i64, i64) 607 608define void @test_vsoxseg3_nxv1i8_triscv.vector.tuple_nxv1i8_3t_nxv1i64(target("riscv.vector.tuple", <vscale x 1 x i8>, 3) %val, ptr %base, <vscale x 1 x i64> %index, i64 %vl) { 609; CHECK-LABEL: test_vsoxseg3_nxv1i8_triscv.vector.tuple_nxv1i8_3t_nxv1i64: 610; CHECK: # %bb.0: # %entry 611; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma 612; CHECK-NEXT: vsoxseg3ei64.v v8, (a0), v11 613; CHECK-NEXT: ret 614entry: 615 tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i64(target("riscv.vector.tuple", <vscale x 1 x i8>, 3) %val, ptr %base, <vscale x 1 x i64> %index, i64 %vl, i64 3) 616 ret void 617} 618 619define void @test_vsoxseg3_mask_nxv1i8_triscv.vector.tuple_nxv1i8_3t_nxv1i64(target("riscv.vector.tuple", <vscale x 1 x i8>, 3) %val, ptr %base, <vscale x 1 x i64> %index, i64 %vl, <vscale x 1 x i1> %mask) { 620; CHECK-LABEL: test_vsoxseg3_mask_nxv1i8_triscv.vector.tuple_nxv1i8_3t_nxv1i64: 621; CHECK: # %bb.0: # %entry 622; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma 623; CHECK-NEXT: vsoxseg3ei64.v v8, (a0), v11, v0.t 624; CHECK-NEXT: ret 625entry: 626 tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i64.nxv1i1(target("riscv.vector.tuple", <vscale x 1 x i8>, 3) %val, ptr %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 3) 627 ret void 628} 629 630declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i8(target("riscv.vector.tuple", <vscale x 2 x i8>, 3), ptr, <vscale x 2 x i8>, i64, i64) 631declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i8.nxv2i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 3), ptr, <vscale x 2 x i8>, <vscale x 2 x i1>, i64, i64) 632 633define void @test_vsoxseg3_nxv2i8_triscv.vector.tuple_nxv2i8_3t_nxv2i8(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) %val, ptr %base, <vscale x 2 x i8> %index, i64 %vl) { 634; CHECK-LABEL: test_vsoxseg3_nxv2i8_triscv.vector.tuple_nxv2i8_3t_nxv2i8: 635; CHECK: # %bb.0: # %entry 636; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma 637; CHECK-NEXT: vsoxseg3ei8.v v8, (a0), v11 638; CHECK-NEXT: ret 639entry: 640 tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i8(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) %val, ptr %base, <vscale x 2 x i8> %index, i64 %vl, i64 3) 641 ret void 642} 643 644define void @test_vsoxseg3_mask_nxv2i8_triscv.vector.tuple_nxv2i8_3t_nxv2i8(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) %val, ptr %base, <vscale x 2 x i8> %index, i64 %vl, <vscale x 2 x i1> %mask) { 645; CHECK-LABEL: test_vsoxseg3_mask_nxv2i8_triscv.vector.tuple_nxv2i8_3t_nxv2i8: 646; CHECK: # %bb.0: # %entry 647; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma 648; CHECK-NEXT: vsoxseg3ei8.v v8, (a0), v11, v0.t 649; CHECK-NEXT: ret 650entry: 651 tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i8.nxv2i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 3) 652 ret void 653} 654 655declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i16(target("riscv.vector.tuple", <vscale x 2 x i8>, 3), ptr, <vscale x 2 x i16>, i64, i64) 656declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i16.nxv2i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 3), ptr, <vscale x 2 x i16>, <vscale x 2 x i1>, i64, i64) 657 658define void @test_vsoxseg3_nxv2i8_triscv.vector.tuple_nxv2i8_3t_nxv2i16(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) %val, ptr %base, <vscale x 2 x i16> %index, i64 %vl) { 659; CHECK-LABEL: test_vsoxseg3_nxv2i8_triscv.vector.tuple_nxv2i8_3t_nxv2i16: 660; CHECK: # %bb.0: # %entry 661; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma 662; CHECK-NEXT: vsoxseg3ei16.v v8, (a0), v11 663; CHECK-NEXT: ret 664entry: 665 tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i16(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) %val, ptr %base, <vscale x 2 x i16> %index, i64 %vl, i64 3) 666 ret void 667} 668 669define void @test_vsoxseg3_mask_nxv2i8_triscv.vector.tuple_nxv2i8_3t_nxv2i16(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) %val, ptr %base, <vscale x 2 x i16> %index, i64 %vl, <vscale x 2 x i1> %mask) { 670; CHECK-LABEL: test_vsoxseg3_mask_nxv2i8_triscv.vector.tuple_nxv2i8_3t_nxv2i16: 671; CHECK: # %bb.0: # %entry 672; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma 673; CHECK-NEXT: vsoxseg3ei16.v v8, (a0), v11, v0.t 674; CHECK-NEXT: ret 675entry: 676 tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i16.nxv2i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 3) 677 ret void 678} 679 680declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i32(target("riscv.vector.tuple", <vscale x 2 x i8>, 3), ptr, <vscale x 2 x i32>, i64, i64) 681declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i32.nxv2i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 3), ptr, <vscale x 2 x i32>, <vscale x 2 x i1>, i64, i64) 682 683define void @test_vsoxseg3_nxv2i8_triscv.vector.tuple_nxv2i8_3t_nxv2i32(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) %val, ptr %base, <vscale x 2 x i32> %index, i64 %vl) { 684; CHECK-LABEL: test_vsoxseg3_nxv2i8_triscv.vector.tuple_nxv2i8_3t_nxv2i32: 685; CHECK: # %bb.0: # %entry 686; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma 687; CHECK-NEXT: vsoxseg3ei32.v v8, (a0), v11 688; CHECK-NEXT: ret 689entry: 690 tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i32(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) %val, ptr %base, <vscale x 2 x i32> %index, i64 %vl, i64 3) 691 ret void 692} 693 694define void @test_vsoxseg3_mask_nxv2i8_triscv.vector.tuple_nxv2i8_3t_nxv2i32(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) %val, ptr %base, <vscale x 2 x i32> %index, i64 %vl, <vscale x 2 x i1> %mask) { 695; CHECK-LABEL: test_vsoxseg3_mask_nxv2i8_triscv.vector.tuple_nxv2i8_3t_nxv2i32: 696; CHECK: # %bb.0: # %entry 697; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma 698; CHECK-NEXT: vsoxseg3ei32.v v8, (a0), v11, v0.t 699; CHECK-NEXT: ret 700entry: 701 tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i32.nxv2i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 3) 702 ret void 703} 704 705declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 3), ptr, <vscale x 2 x i64>, i64, i64) 706declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i64.nxv2i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 3), ptr, <vscale x 2 x i64>, <vscale x 2 x i1>, i64, i64) 707 708define void @test_vsoxseg3_nxv2i8_triscv.vector.tuple_nxv2i8_3t_nxv2i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl) { 709; CHECK-LABEL: test_vsoxseg3_nxv2i8_triscv.vector.tuple_nxv2i8_3t_nxv2i64: 710; CHECK: # %bb.0: # %entry 711; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma 712; CHECK-NEXT: vsoxseg3ei64.v v8, (a0), v12 713; CHECK-NEXT: ret 714entry: 715 tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl, i64 3) 716 ret void 717} 718 719define void @test_vsoxseg3_mask_nxv2i8_triscv.vector.tuple_nxv2i8_3t_nxv2i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl, <vscale x 2 x i1> %mask) { 720; CHECK-LABEL: test_vsoxseg3_mask_nxv2i8_triscv.vector.tuple_nxv2i8_3t_nxv2i64: 721; CHECK: # %bb.0: # %entry 722; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma 723; CHECK-NEXT: vsoxseg3ei64.v v8, (a0), v12, v0.t 724; CHECK-NEXT: ret 725entry: 726 tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i64.nxv2i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) %val, ptr %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 3) 727 ret void 728} 729 730declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 3), ptr, <vscale x 4 x i8>, i64, i64) 731declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i8.nxv4i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 3), ptr, <vscale x 4 x i8>, <vscale x 4 x i1>, i64, i64) 732 733define void @test_vsoxseg3_nxv4i8_triscv.vector.tuple_nxv4i8_3t_nxv4i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %val, ptr %base, <vscale x 4 x i8> %index, i64 %vl) { 734; CHECK-LABEL: test_vsoxseg3_nxv4i8_triscv.vector.tuple_nxv4i8_3t_nxv4i8: 735; CHECK: # %bb.0: # %entry 736; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma 737; CHECK-NEXT: vsoxseg3ei8.v v8, (a0), v11 738; CHECK-NEXT: ret 739entry: 740 tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %val, ptr %base, <vscale x 4 x i8> %index, i64 %vl, i64 3) 741 ret void 742} 743 744define void @test_vsoxseg3_mask_nxv4i8_triscv.vector.tuple_nxv4i8_3t_nxv4i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %val, ptr %base, <vscale x 4 x i8> %index, i64 %vl, <vscale x 4 x i1> %mask) { 745; CHECK-LABEL: test_vsoxseg3_mask_nxv4i8_triscv.vector.tuple_nxv4i8_3t_nxv4i8: 746; CHECK: # %bb.0: # %entry 747; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma 748; CHECK-NEXT: vsoxseg3ei8.v v8, (a0), v11, v0.t 749; CHECK-NEXT: ret 750entry: 751 tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i8.nxv4i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %val, ptr %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 3) 752 ret void 753} 754 755declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 3), ptr, <vscale x 4 x i16>, i64, i64) 756declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i16.nxv4i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 3), ptr, <vscale x 4 x i16>, <vscale x 4 x i1>, i64, i64) 757 758define void @test_vsoxseg3_nxv4i8_triscv.vector.tuple_nxv4i8_3t_nxv4i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %val, ptr %base, <vscale x 4 x i16> %index, i64 %vl) { 759; CHECK-LABEL: test_vsoxseg3_nxv4i8_triscv.vector.tuple_nxv4i8_3t_nxv4i16: 760; CHECK: # %bb.0: # %entry 761; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma 762; CHECK-NEXT: vsoxseg3ei16.v v8, (a0), v11 763; CHECK-NEXT: ret 764entry: 765 tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %val, ptr %base, <vscale x 4 x i16> %index, i64 %vl, i64 3) 766 ret void 767} 768 769define void @test_vsoxseg3_mask_nxv4i8_triscv.vector.tuple_nxv4i8_3t_nxv4i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %val, ptr %base, <vscale x 4 x i16> %index, i64 %vl, <vscale x 4 x i1> %mask) { 770; CHECK-LABEL: test_vsoxseg3_mask_nxv4i8_triscv.vector.tuple_nxv4i8_3t_nxv4i16: 771; CHECK: # %bb.0: # %entry 772; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma 773; CHECK-NEXT: vsoxseg3ei16.v v8, (a0), v11, v0.t 774; CHECK-NEXT: ret 775entry: 776 tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i16.nxv4i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %val, ptr %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 3) 777 ret void 778} 779 780declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 3), ptr, <vscale x 4 x i32>, i64, i64) 781declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i32.nxv4i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 3), ptr, <vscale x 4 x i32>, <vscale x 4 x i1>, i64, i64) 782 783define void @test_vsoxseg3_nxv4i8_triscv.vector.tuple_nxv4i8_3t_nxv4i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %val, ptr %base, <vscale x 4 x i32> %index, i64 %vl) { 784; CHECK-LABEL: test_vsoxseg3_nxv4i8_triscv.vector.tuple_nxv4i8_3t_nxv4i32: 785; CHECK: # %bb.0: # %entry 786; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma 787; CHECK-NEXT: vsoxseg3ei32.v v8, (a0), v12 788; CHECK-NEXT: ret 789entry: 790 tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %val, ptr %base, <vscale x 4 x i32> %index, i64 %vl, i64 3) 791 ret void 792} 793 794define void @test_vsoxseg3_mask_nxv4i8_triscv.vector.tuple_nxv4i8_3t_nxv4i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %val, ptr %base, <vscale x 4 x i32> %index, i64 %vl, <vscale x 4 x i1> %mask) { 795; CHECK-LABEL: test_vsoxseg3_mask_nxv4i8_triscv.vector.tuple_nxv4i8_3t_nxv4i32: 796; CHECK: # %bb.0: # %entry 797; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma 798; CHECK-NEXT: vsoxseg3ei32.v v8, (a0), v12, v0.t 799; CHECK-NEXT: ret 800entry: 801 tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i32.nxv4i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 3) 802 ret void 803} 804 805declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 3), ptr, <vscale x 4 x i64>, i64, i64) 806declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i64.nxv4i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 3), ptr, <vscale x 4 x i64>, <vscale x 4 x i1>, i64, i64) 807 808define void @test_vsoxseg3_nxv4i8_triscv.vector.tuple_nxv4i8_3t_nxv4i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %val, ptr %base, <vscale x 4 x i64> %index, i64 %vl) { 809; CHECK-LABEL: test_vsoxseg3_nxv4i8_triscv.vector.tuple_nxv4i8_3t_nxv4i64: 810; CHECK: # %bb.0: # %entry 811; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma 812; CHECK-NEXT: vsoxseg3ei64.v v8, (a0), v12 813; CHECK-NEXT: ret 814entry: 815 tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %val, ptr %base, <vscale x 4 x i64> %index, i64 %vl, i64 3) 816 ret void 817} 818 819define void @test_vsoxseg3_mask_nxv4i8_triscv.vector.tuple_nxv4i8_3t_nxv4i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %val, ptr %base, <vscale x 4 x i64> %index, i64 %vl, <vscale x 4 x i1> %mask) { 820; CHECK-LABEL: test_vsoxseg3_mask_nxv4i8_triscv.vector.tuple_nxv4i8_3t_nxv4i64: 821; CHECK: # %bb.0: # %entry 822; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma 823; CHECK-NEXT: vsoxseg3ei64.v v8, (a0), v12, v0.t 824; CHECK-NEXT: ret 825entry: 826 tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i64.nxv4i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %val, ptr %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 3) 827 ret void 828} 829 830declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 3), ptr, <vscale x 8 x i8>, i64, i64) 831declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i8.nxv8i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 3), ptr, <vscale x 8 x i8>, <vscale x 8 x i1>, i64, i64) 832 833define void @test_vsoxseg3_nxv8i8_triscv.vector.tuple_nxv8i8_3t_nxv8i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, <vscale x 8 x i8> %index, i64 %vl) { 834; CHECK-LABEL: test_vsoxseg3_nxv8i8_triscv.vector.tuple_nxv8i8_3t_nxv8i8: 835; CHECK: # %bb.0: # %entry 836; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma 837; CHECK-NEXT: vsoxseg3ei8.v v8, (a0), v11 838; CHECK-NEXT: ret 839entry: 840 tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, <vscale x 8 x i8> %index, i64 %vl, i64 3) 841 ret void 842} 843 844define void @test_vsoxseg3_mask_nxv8i8_triscv.vector.tuple_nxv8i8_3t_nxv8i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, <vscale x 8 x i8> %index, i64 %vl, <vscale x 8 x i1> %mask) { 845; CHECK-LABEL: test_vsoxseg3_mask_nxv8i8_triscv.vector.tuple_nxv8i8_3t_nxv8i8: 846; CHECK: # %bb.0: # %entry 847; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma 848; CHECK-NEXT: vsoxseg3ei8.v v8, (a0), v11, v0.t 849; CHECK-NEXT: ret 850entry: 851 tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i8.nxv8i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, <vscale x 8 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 3) 852 ret void 853} 854 855declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 3), ptr, <vscale x 8 x i16>, i64, i64) 856declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i16.nxv8i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 3), ptr, <vscale x 8 x i16>, <vscale x 8 x i1>, i64, i64) 857 858define void @test_vsoxseg3_nxv8i8_triscv.vector.tuple_nxv8i8_3t_nxv8i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, <vscale x 8 x i16> %index, i64 %vl) { 859; CHECK-LABEL: test_vsoxseg3_nxv8i8_triscv.vector.tuple_nxv8i8_3t_nxv8i16: 860; CHECK: # %bb.0: # %entry 861; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma 862; CHECK-NEXT: vsoxseg3ei16.v v8, (a0), v12 863; CHECK-NEXT: ret 864entry: 865 tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, <vscale x 8 x i16> %index, i64 %vl, i64 3) 866 ret void 867} 868 869define void @test_vsoxseg3_mask_nxv8i8_triscv.vector.tuple_nxv8i8_3t_nxv8i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, <vscale x 8 x i16> %index, i64 %vl, <vscale x 8 x i1> %mask) { 870; CHECK-LABEL: test_vsoxseg3_mask_nxv8i8_triscv.vector.tuple_nxv8i8_3t_nxv8i16: 871; CHECK: # %bb.0: # %entry 872; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma 873; CHECK-NEXT: vsoxseg3ei16.v v8, (a0), v12, v0.t 874; CHECK-NEXT: ret 875entry: 876 tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i16.nxv8i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 3) 877 ret void 878} 879 880declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 3), ptr, <vscale x 8 x i32>, i64, i64) 881declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i32.nxv8i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 3), ptr, <vscale x 8 x i32>, <vscale x 8 x i1>, i64, i64) 882 883define void @test_vsoxseg3_nxv8i8_triscv.vector.tuple_nxv8i8_3t_nxv8i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, <vscale x 8 x i32> %index, i64 %vl) { 884; CHECK-LABEL: test_vsoxseg3_nxv8i8_triscv.vector.tuple_nxv8i8_3t_nxv8i32: 885; CHECK: # %bb.0: # %entry 886; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma 887; CHECK-NEXT: vsoxseg3ei32.v v8, (a0), v12 888; CHECK-NEXT: ret 889entry: 890 tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, <vscale x 8 x i32> %index, i64 %vl, i64 3) 891 ret void 892} 893 894define void @test_vsoxseg3_mask_nxv8i8_triscv.vector.tuple_nxv8i8_3t_nxv8i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, <vscale x 8 x i32> %index, i64 %vl, <vscale x 8 x i1> %mask) { 895; CHECK-LABEL: test_vsoxseg3_mask_nxv8i8_triscv.vector.tuple_nxv8i8_3t_nxv8i32: 896; CHECK: # %bb.0: # %entry 897; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma 898; CHECK-NEXT: vsoxseg3ei32.v v8, (a0), v12, v0.t 899; CHECK-NEXT: ret 900entry: 901 tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i32.nxv8i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 3) 902 ret void 903} 904 905declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 3), ptr, <vscale x 8 x i64>, i64, i64) 906declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i64.nxv8i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 3), ptr, <vscale x 8 x i64>, <vscale x 8 x i1>, i64, i64) 907 908define void @test_vsoxseg3_nxv8i8_triscv.vector.tuple_nxv8i8_3t_nxv8i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, <vscale x 8 x i64> %index, i64 %vl) { 909; CHECK-LABEL: test_vsoxseg3_nxv8i8_triscv.vector.tuple_nxv8i8_3t_nxv8i64: 910; CHECK: # %bb.0: # %entry 911; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma 912; CHECK-NEXT: vsoxseg3ei64.v v8, (a0), v16 913; CHECK-NEXT: ret 914entry: 915 tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, <vscale x 8 x i64> %index, i64 %vl, i64 3) 916 ret void 917} 918 919define void @test_vsoxseg3_mask_nxv8i8_triscv.vector.tuple_nxv8i8_3t_nxv8i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, <vscale x 8 x i64> %index, i64 %vl, <vscale x 8 x i1> %mask) { 920; CHECK-LABEL: test_vsoxseg3_mask_nxv8i8_triscv.vector.tuple_nxv8i8_3t_nxv8i64: 921; CHECK: # %bb.0: # %entry 922; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma 923; CHECK-NEXT: vsoxseg3ei64.v v8, (a0), v16, v0.t 924; CHECK-NEXT: ret 925entry: 926 tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i64.nxv8i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, <vscale x 8 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 3) 927 ret void 928} 929 930declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i8(target("riscv.vector.tuple", <vscale x 16 x i8>, 3), ptr, <vscale x 16 x i8>, i64, i64) 931declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i8.nxv16i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 3), ptr, <vscale x 16 x i8>, <vscale x 16 x i1>, i64, i64) 932 933define void @test_vsoxseg3_nxv16i8_triscv.vector.tuple_nxv16i8_3t_nxv16i8(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, <vscale x 16 x i8> %index, i64 %vl) { 934; CHECK-LABEL: test_vsoxseg3_nxv16i8_triscv.vector.tuple_nxv16i8_3t_nxv16i8: 935; CHECK: # %bb.0: # %entry 936; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma 937; CHECK-NEXT: vsoxseg3ei8.v v8, (a0), v14 938; CHECK-NEXT: ret 939entry: 940 tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i8(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, <vscale x 16 x i8> %index, i64 %vl, i64 3) 941 ret void 942} 943 944define void @test_vsoxseg3_mask_nxv16i8_triscv.vector.tuple_nxv16i8_3t_nxv16i8(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, <vscale x 16 x i8> %index, i64 %vl, <vscale x 16 x i1> %mask) { 945; CHECK-LABEL: test_vsoxseg3_mask_nxv16i8_triscv.vector.tuple_nxv16i8_3t_nxv16i8: 946; CHECK: # %bb.0: # %entry 947; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma 948; CHECK-NEXT: vsoxseg3ei8.v v8, (a0), v14, v0.t 949; CHECK-NEXT: ret 950entry: 951 tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i8.nxv16i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, <vscale x 16 x i8> %index, <vscale x 16 x i1> %mask, i64 %vl, i64 3) 952 ret void 953} 954 955declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i16(target("riscv.vector.tuple", <vscale x 16 x i8>, 3), ptr, <vscale x 16 x i16>, i64, i64) 956declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i16.nxv16i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 3), ptr, <vscale x 16 x i16>, <vscale x 16 x i1>, i64, i64) 957 958define void @test_vsoxseg3_nxv16i8_triscv.vector.tuple_nxv16i8_3t_nxv16i16(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, <vscale x 16 x i16> %index, i64 %vl) { 959; CHECK-LABEL: test_vsoxseg3_nxv16i8_triscv.vector.tuple_nxv16i8_3t_nxv16i16: 960; CHECK: # %bb.0: # %entry 961; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma 962; CHECK-NEXT: vsoxseg3ei16.v v8, (a0), v16 963; CHECK-NEXT: ret 964entry: 965 tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i16(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, <vscale x 16 x i16> %index, i64 %vl, i64 3) 966 ret void 967} 968 969define void @test_vsoxseg3_mask_nxv16i8_triscv.vector.tuple_nxv16i8_3t_nxv16i16(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, <vscale x 16 x i16> %index, i64 %vl, <vscale x 16 x i1> %mask) { 970; CHECK-LABEL: test_vsoxseg3_mask_nxv16i8_triscv.vector.tuple_nxv16i8_3t_nxv16i16: 971; CHECK: # %bb.0: # %entry 972; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma 973; CHECK-NEXT: vsoxseg3ei16.v v8, (a0), v16, v0.t 974; CHECK-NEXT: ret 975entry: 976 tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i16.nxv16i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, <vscale x 16 x i16> %index, <vscale x 16 x i1> %mask, i64 %vl, i64 3) 977 ret void 978} 979 980declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i32(target("riscv.vector.tuple", <vscale x 16 x i8>, 3), ptr, <vscale x 16 x i32>, i64, i64) 981declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i32.nxv16i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 3), ptr, <vscale x 16 x i32>, <vscale x 16 x i1>, i64, i64) 982 983define void @test_vsoxseg3_nxv16i8_triscv.vector.tuple_nxv16i8_3t_nxv16i32(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, <vscale x 16 x i32> %index, i64 %vl) { 984; CHECK-LABEL: test_vsoxseg3_nxv16i8_triscv.vector.tuple_nxv16i8_3t_nxv16i32: 985; CHECK: # %bb.0: # %entry 986; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma 987; CHECK-NEXT: vsoxseg3ei32.v v8, (a0), v16 988; CHECK-NEXT: ret 989entry: 990 tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i32(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, <vscale x 16 x i32> %index, i64 %vl, i64 3) 991 ret void 992} 993 994define void @test_vsoxseg3_mask_nxv16i8_triscv.vector.tuple_nxv16i8_3t_nxv16i32(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, <vscale x 16 x i32> %index, i64 %vl, <vscale x 16 x i1> %mask) { 995; CHECK-LABEL: test_vsoxseg3_mask_nxv16i8_triscv.vector.tuple_nxv16i8_3t_nxv16i32: 996; CHECK: # %bb.0: # %entry 997; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma 998; CHECK-NEXT: vsoxseg3ei32.v v8, (a0), v16, v0.t 999; CHECK-NEXT: ret 1000entry: 1001 tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i32.nxv16i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, <vscale x 16 x i32> %index, <vscale x 16 x i1> %mask, i64 %vl, i64 3) 1002 ret void 1003} 1004 1005declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i8(target("riscv.vector.tuple", <vscale x 1 x i8>, 4), ptr, <vscale x 1 x i8>, i64, i64) 1006declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i8.nxv1i1(target("riscv.vector.tuple", <vscale x 1 x i8>, 4), ptr, <vscale x 1 x i8>, <vscale x 1 x i1>, i64, i64) 1007 1008define void @test_vsoxseg4_nxv1i8_triscv.vector.tuple_nxv1i8_4t_nxv1i8(target("riscv.vector.tuple", <vscale x 1 x i8>, 4) %val, ptr %base, <vscale x 1 x i8> %index, i64 %vl) { 1009; CHECK-LABEL: test_vsoxseg4_nxv1i8_triscv.vector.tuple_nxv1i8_4t_nxv1i8: 1010; CHECK: # %bb.0: # %entry 1011; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma 1012; CHECK-NEXT: vsoxseg4ei8.v v8, (a0), v12 1013; CHECK-NEXT: ret 1014entry: 1015 tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i8(target("riscv.vector.tuple", <vscale x 1 x i8>, 4) %val, ptr %base, <vscale x 1 x i8> %index, i64 %vl, i64 3) 1016 ret void 1017} 1018 1019define void @test_vsoxseg4_mask_nxv1i8_triscv.vector.tuple_nxv1i8_4t_nxv1i8(target("riscv.vector.tuple", <vscale x 1 x i8>, 4) %val, ptr %base, <vscale x 1 x i8> %index, i64 %vl, <vscale x 1 x i1> %mask) { 1020; CHECK-LABEL: test_vsoxseg4_mask_nxv1i8_triscv.vector.tuple_nxv1i8_4t_nxv1i8: 1021; CHECK: # %bb.0: # %entry 1022; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma 1023; CHECK-NEXT: vsoxseg4ei8.v v8, (a0), v12, v0.t 1024; CHECK-NEXT: ret 1025entry: 1026 tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i8.nxv1i1(target("riscv.vector.tuple", <vscale x 1 x i8>, 4) %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 3) 1027 ret void 1028} 1029 1030declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i16(target("riscv.vector.tuple", <vscale x 1 x i8>, 4), ptr, <vscale x 1 x i16>, i64, i64) 1031declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i16.nxv1i1(target("riscv.vector.tuple", <vscale x 1 x i8>, 4), ptr, <vscale x 1 x i16>, <vscale x 1 x i1>, i64, i64) 1032 1033define void @test_vsoxseg4_nxv1i8_triscv.vector.tuple_nxv1i8_4t_nxv1i16(target("riscv.vector.tuple", <vscale x 1 x i8>, 4) %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl) { 1034; CHECK-LABEL: test_vsoxseg4_nxv1i8_triscv.vector.tuple_nxv1i8_4t_nxv1i16: 1035; CHECK: # %bb.0: # %entry 1036; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma 1037; CHECK-NEXT: vsoxseg4ei16.v v8, (a0), v12 1038; CHECK-NEXT: ret 1039entry: 1040 tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i16(target("riscv.vector.tuple", <vscale x 1 x i8>, 4) %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl, i64 3) 1041 ret void 1042} 1043 1044define void @test_vsoxseg4_mask_nxv1i8_triscv.vector.tuple_nxv1i8_4t_nxv1i16(target("riscv.vector.tuple", <vscale x 1 x i8>, 4) %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl, <vscale x 1 x i1> %mask) { 1045; CHECK-LABEL: test_vsoxseg4_mask_nxv1i8_triscv.vector.tuple_nxv1i8_4t_nxv1i16: 1046; CHECK: # %bb.0: # %entry 1047; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma 1048; CHECK-NEXT: vsoxseg4ei16.v v8, (a0), v12, v0.t 1049; CHECK-NEXT: ret 1050entry: 1051 tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i16.nxv1i1(target("riscv.vector.tuple", <vscale x 1 x i8>, 4) %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 3) 1052 ret void 1053} 1054 1055declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i32(target("riscv.vector.tuple", <vscale x 1 x i8>, 4), ptr, <vscale x 1 x i32>, i64, i64) 1056declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i32.nxv1i1(target("riscv.vector.tuple", <vscale x 1 x i8>, 4), ptr, <vscale x 1 x i32>, <vscale x 1 x i1>, i64, i64) 1057 1058define void @test_vsoxseg4_nxv1i8_triscv.vector.tuple_nxv1i8_4t_nxv1i32(target("riscv.vector.tuple", <vscale x 1 x i8>, 4) %val, ptr %base, <vscale x 1 x i32> %index, i64 %vl) { 1059; CHECK-LABEL: test_vsoxseg4_nxv1i8_triscv.vector.tuple_nxv1i8_4t_nxv1i32: 1060; CHECK: # %bb.0: # %entry 1061; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma 1062; CHECK-NEXT: vsoxseg4ei32.v v8, (a0), v12 1063; CHECK-NEXT: ret 1064entry: 1065 tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i32(target("riscv.vector.tuple", <vscale x 1 x i8>, 4) %val, ptr %base, <vscale x 1 x i32> %index, i64 %vl, i64 3) 1066 ret void 1067} 1068 1069define void @test_vsoxseg4_mask_nxv1i8_triscv.vector.tuple_nxv1i8_4t_nxv1i32(target("riscv.vector.tuple", <vscale x 1 x i8>, 4) %val, ptr %base, <vscale x 1 x i32> %index, i64 %vl, <vscale x 1 x i1> %mask) { 1070; CHECK-LABEL: test_vsoxseg4_mask_nxv1i8_triscv.vector.tuple_nxv1i8_4t_nxv1i32: 1071; CHECK: # %bb.0: # %entry 1072; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma 1073; CHECK-NEXT: vsoxseg4ei32.v v8, (a0), v12, v0.t 1074; CHECK-NEXT: ret 1075entry: 1076 tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i32.nxv1i1(target("riscv.vector.tuple", <vscale x 1 x i8>, 4) %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 3) 1077 ret void 1078} 1079 1080declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i64(target("riscv.vector.tuple", <vscale x 1 x i8>, 4), ptr, <vscale x 1 x i64>, i64, i64) 1081declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i64.nxv1i1(target("riscv.vector.tuple", <vscale x 1 x i8>, 4), ptr, <vscale x 1 x i64>, <vscale x 1 x i1>, i64, i64) 1082 1083define void @test_vsoxseg4_nxv1i8_triscv.vector.tuple_nxv1i8_4t_nxv1i64(target("riscv.vector.tuple", <vscale x 1 x i8>, 4) %val, ptr %base, <vscale x 1 x i64> %index, i64 %vl) { 1084; CHECK-LABEL: test_vsoxseg4_nxv1i8_triscv.vector.tuple_nxv1i8_4t_nxv1i64: 1085; CHECK: # %bb.0: # %entry 1086; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma 1087; CHECK-NEXT: vsoxseg4ei64.v v8, (a0), v12 1088; CHECK-NEXT: ret 1089entry: 1090 tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i64(target("riscv.vector.tuple", <vscale x 1 x i8>, 4) %val, ptr %base, <vscale x 1 x i64> %index, i64 %vl, i64 3) 1091 ret void 1092} 1093 1094define void @test_vsoxseg4_mask_nxv1i8_triscv.vector.tuple_nxv1i8_4t_nxv1i64(target("riscv.vector.tuple", <vscale x 1 x i8>, 4) %val, ptr %base, <vscale x 1 x i64> %index, i64 %vl, <vscale x 1 x i1> %mask) { 1095; CHECK-LABEL: test_vsoxseg4_mask_nxv1i8_triscv.vector.tuple_nxv1i8_4t_nxv1i64: 1096; CHECK: # %bb.0: # %entry 1097; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma 1098; CHECK-NEXT: vsoxseg4ei64.v v8, (a0), v12, v0.t 1099; CHECK-NEXT: ret 1100entry: 1101 tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i64.nxv1i1(target("riscv.vector.tuple", <vscale x 1 x i8>, 4) %val, ptr %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 3) 1102 ret void 1103} 1104 1105declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i8(target("riscv.vector.tuple", <vscale x 2 x i8>, 4), ptr, <vscale x 2 x i8>, i64, i64) 1106declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i8.nxv2i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 4), ptr, <vscale x 2 x i8>, <vscale x 2 x i1>, i64, i64) 1107 1108define void @test_vsoxseg4_nxv2i8_triscv.vector.tuple_nxv2i8_4t_nxv2i8(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) %val, ptr %base, <vscale x 2 x i8> %index, i64 %vl) { 1109; CHECK-LABEL: test_vsoxseg4_nxv2i8_triscv.vector.tuple_nxv2i8_4t_nxv2i8: 1110; CHECK: # %bb.0: # %entry 1111; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma 1112; CHECK-NEXT: vsoxseg4ei8.v v8, (a0), v12 1113; CHECK-NEXT: ret 1114entry: 1115 tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i8(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) %val, ptr %base, <vscale x 2 x i8> %index, i64 %vl, i64 3) 1116 ret void 1117} 1118 1119define void @test_vsoxseg4_mask_nxv2i8_triscv.vector.tuple_nxv2i8_4t_nxv2i8(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) %val, ptr %base, <vscale x 2 x i8> %index, i64 %vl, <vscale x 2 x i1> %mask) { 1120; CHECK-LABEL: test_vsoxseg4_mask_nxv2i8_triscv.vector.tuple_nxv2i8_4t_nxv2i8: 1121; CHECK: # %bb.0: # %entry 1122; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma 1123; CHECK-NEXT: vsoxseg4ei8.v v8, (a0), v12, v0.t 1124; CHECK-NEXT: ret 1125entry: 1126 tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i8.nxv2i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 3) 1127 ret void 1128} 1129 1130declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i16(target("riscv.vector.tuple", <vscale x 2 x i8>, 4), ptr, <vscale x 2 x i16>, i64, i64) 1131declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i16.nxv2i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 4), ptr, <vscale x 2 x i16>, <vscale x 2 x i1>, i64, i64) 1132 1133define void @test_vsoxseg4_nxv2i8_triscv.vector.tuple_nxv2i8_4t_nxv2i16(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) %val, ptr %base, <vscale x 2 x i16> %index, i64 %vl) { 1134; CHECK-LABEL: test_vsoxseg4_nxv2i8_triscv.vector.tuple_nxv2i8_4t_nxv2i16: 1135; CHECK: # %bb.0: # %entry 1136; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma 1137; CHECK-NEXT: vsoxseg4ei16.v v8, (a0), v12 1138; CHECK-NEXT: ret 1139entry: 1140 tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i16(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) %val, ptr %base, <vscale x 2 x i16> %index, i64 %vl, i64 3) 1141 ret void 1142} 1143 1144define void @test_vsoxseg4_mask_nxv2i8_triscv.vector.tuple_nxv2i8_4t_nxv2i16(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) %val, ptr %base, <vscale x 2 x i16> %index, i64 %vl, <vscale x 2 x i1> %mask) { 1145; CHECK-LABEL: test_vsoxseg4_mask_nxv2i8_triscv.vector.tuple_nxv2i8_4t_nxv2i16: 1146; CHECK: # %bb.0: # %entry 1147; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma 1148; CHECK-NEXT: vsoxseg4ei16.v v8, (a0), v12, v0.t 1149; CHECK-NEXT: ret 1150entry: 1151 tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i16.nxv2i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 3) 1152 ret void 1153} 1154 1155declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i32(target("riscv.vector.tuple", <vscale x 2 x i8>, 4), ptr, <vscale x 2 x i32>, i64, i64) 1156declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i32.nxv2i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 4), ptr, <vscale x 2 x i32>, <vscale x 2 x i1>, i64, i64) 1157 1158define void @test_vsoxseg4_nxv2i8_triscv.vector.tuple_nxv2i8_4t_nxv2i32(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) %val, ptr %base, <vscale x 2 x i32> %index, i64 %vl) { 1159; CHECK-LABEL: test_vsoxseg4_nxv2i8_triscv.vector.tuple_nxv2i8_4t_nxv2i32: 1160; CHECK: # %bb.0: # %entry 1161; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma 1162; CHECK-NEXT: vsoxseg4ei32.v v8, (a0), v12 1163; CHECK-NEXT: ret 1164entry: 1165 tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i32(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) %val, ptr %base, <vscale x 2 x i32> %index, i64 %vl, i64 3) 1166 ret void 1167} 1168 1169define void @test_vsoxseg4_mask_nxv2i8_triscv.vector.tuple_nxv2i8_4t_nxv2i32(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) %val, ptr %base, <vscale x 2 x i32> %index, i64 %vl, <vscale x 2 x i1> %mask) { 1170; CHECK-LABEL: test_vsoxseg4_mask_nxv2i8_triscv.vector.tuple_nxv2i8_4t_nxv2i32: 1171; CHECK: # %bb.0: # %entry 1172; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma 1173; CHECK-NEXT: vsoxseg4ei32.v v8, (a0), v12, v0.t 1174; CHECK-NEXT: ret 1175entry: 1176 tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i32.nxv2i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 3) 1177 ret void 1178} 1179 1180declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 4), ptr, <vscale x 2 x i64>, i64, i64) 1181declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i64.nxv2i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 4), ptr, <vscale x 2 x i64>, <vscale x 2 x i1>, i64, i64) 1182 1183define void @test_vsoxseg4_nxv2i8_triscv.vector.tuple_nxv2i8_4t_nxv2i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl) { 1184; CHECK-LABEL: test_vsoxseg4_nxv2i8_triscv.vector.tuple_nxv2i8_4t_nxv2i64: 1185; CHECK: # %bb.0: # %entry 1186; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma 1187; CHECK-NEXT: vsoxseg4ei64.v v8, (a0), v12 1188; CHECK-NEXT: ret 1189entry: 1190 tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl, i64 3) 1191 ret void 1192} 1193 1194define void @test_vsoxseg4_mask_nxv2i8_triscv.vector.tuple_nxv2i8_4t_nxv2i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl, <vscale x 2 x i1> %mask) { 1195; CHECK-LABEL: test_vsoxseg4_mask_nxv2i8_triscv.vector.tuple_nxv2i8_4t_nxv2i64: 1196; CHECK: # %bb.0: # %entry 1197; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma 1198; CHECK-NEXT: vsoxseg4ei64.v v8, (a0), v12, v0.t 1199; CHECK-NEXT: ret 1200entry: 1201 tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i64.nxv2i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) %val, ptr %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 3) 1202 ret void 1203} 1204 1205declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 4), ptr, <vscale x 4 x i8>, i64, i64) 1206declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i8.nxv4i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 4), ptr, <vscale x 4 x i8>, <vscale x 4 x i1>, i64, i64) 1207 1208define void @test_vsoxseg4_nxv4i8_triscv.vector.tuple_nxv4i8_4t_nxv4i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %val, ptr %base, <vscale x 4 x i8> %index, i64 %vl) { 1209; CHECK-LABEL: test_vsoxseg4_nxv4i8_triscv.vector.tuple_nxv4i8_4t_nxv4i8: 1210; CHECK: # %bb.0: # %entry 1211; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma 1212; CHECK-NEXT: vsoxseg4ei8.v v8, (a0), v12 1213; CHECK-NEXT: ret 1214entry: 1215 tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %val, ptr %base, <vscale x 4 x i8> %index, i64 %vl, i64 3) 1216 ret void 1217} 1218 1219define void @test_vsoxseg4_mask_nxv4i8_triscv.vector.tuple_nxv4i8_4t_nxv4i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %val, ptr %base, <vscale x 4 x i8> %index, i64 %vl, <vscale x 4 x i1> %mask) { 1220; CHECK-LABEL: test_vsoxseg4_mask_nxv4i8_triscv.vector.tuple_nxv4i8_4t_nxv4i8: 1221; CHECK: # %bb.0: # %entry 1222; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma 1223; CHECK-NEXT: vsoxseg4ei8.v v8, (a0), v12, v0.t 1224; CHECK-NEXT: ret 1225entry: 1226 tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i8.nxv4i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %val, ptr %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 3) 1227 ret void 1228} 1229 1230declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 4), ptr, <vscale x 4 x i16>, i64, i64) 1231declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i16.nxv4i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 4), ptr, <vscale x 4 x i16>, <vscale x 4 x i1>, i64, i64) 1232 1233define void @test_vsoxseg4_nxv4i8_triscv.vector.tuple_nxv4i8_4t_nxv4i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %val, ptr %base, <vscale x 4 x i16> %index, i64 %vl) { 1234; CHECK-LABEL: test_vsoxseg4_nxv4i8_triscv.vector.tuple_nxv4i8_4t_nxv4i16: 1235; CHECK: # %bb.0: # %entry 1236; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma 1237; CHECK-NEXT: vsoxseg4ei16.v v8, (a0), v12 1238; CHECK-NEXT: ret 1239entry: 1240 tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %val, ptr %base, <vscale x 4 x i16> %index, i64 %vl, i64 3) 1241 ret void 1242} 1243 1244define void @test_vsoxseg4_mask_nxv4i8_triscv.vector.tuple_nxv4i8_4t_nxv4i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %val, ptr %base, <vscale x 4 x i16> %index, i64 %vl, <vscale x 4 x i1> %mask) { 1245; CHECK-LABEL: test_vsoxseg4_mask_nxv4i8_triscv.vector.tuple_nxv4i8_4t_nxv4i16: 1246; CHECK: # %bb.0: # %entry 1247; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma 1248; CHECK-NEXT: vsoxseg4ei16.v v8, (a0), v12, v0.t 1249; CHECK-NEXT: ret 1250entry: 1251 tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i16.nxv4i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %val, ptr %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 3) 1252 ret void 1253} 1254 1255declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 4), ptr, <vscale x 4 x i32>, i64, i64) 1256declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i32.nxv4i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 4), ptr, <vscale x 4 x i32>, <vscale x 4 x i1>, i64, i64) 1257 1258define void @test_vsoxseg4_nxv4i8_triscv.vector.tuple_nxv4i8_4t_nxv4i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %val, ptr %base, <vscale x 4 x i32> %index, i64 %vl) { 1259; CHECK-LABEL: test_vsoxseg4_nxv4i8_triscv.vector.tuple_nxv4i8_4t_nxv4i32: 1260; CHECK: # %bb.0: # %entry 1261; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma 1262; CHECK-NEXT: vsoxseg4ei32.v v8, (a0), v12 1263; CHECK-NEXT: ret 1264entry: 1265 tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %val, ptr %base, <vscale x 4 x i32> %index, i64 %vl, i64 3) 1266 ret void 1267} 1268 1269define void @test_vsoxseg4_mask_nxv4i8_triscv.vector.tuple_nxv4i8_4t_nxv4i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %val, ptr %base, <vscale x 4 x i32> %index, i64 %vl, <vscale x 4 x i1> %mask) { 1270; CHECK-LABEL: test_vsoxseg4_mask_nxv4i8_triscv.vector.tuple_nxv4i8_4t_nxv4i32: 1271; CHECK: # %bb.0: # %entry 1272; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma 1273; CHECK-NEXT: vsoxseg4ei32.v v8, (a0), v12, v0.t 1274; CHECK-NEXT: ret 1275entry: 1276 tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i32.nxv4i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 3) 1277 ret void 1278} 1279 1280declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 4), ptr, <vscale x 4 x i64>, i64, i64) 1281declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i64.nxv4i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 4), ptr, <vscale x 4 x i64>, <vscale x 4 x i1>, i64, i64) 1282 1283define void @test_vsoxseg4_nxv4i8_triscv.vector.tuple_nxv4i8_4t_nxv4i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %val, ptr %base, <vscale x 4 x i64> %index, i64 %vl) { 1284; CHECK-LABEL: test_vsoxseg4_nxv4i8_triscv.vector.tuple_nxv4i8_4t_nxv4i64: 1285; CHECK: # %bb.0: # %entry 1286; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma 1287; CHECK-NEXT: vsoxseg4ei64.v v8, (a0), v12 1288; CHECK-NEXT: ret 1289entry: 1290 tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %val, ptr %base, <vscale x 4 x i64> %index, i64 %vl, i64 3) 1291 ret void 1292} 1293 1294define void @test_vsoxseg4_mask_nxv4i8_triscv.vector.tuple_nxv4i8_4t_nxv4i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %val, ptr %base, <vscale x 4 x i64> %index, i64 %vl, <vscale x 4 x i1> %mask) { 1295; CHECK-LABEL: test_vsoxseg4_mask_nxv4i8_triscv.vector.tuple_nxv4i8_4t_nxv4i64: 1296; CHECK: # %bb.0: # %entry 1297; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma 1298; CHECK-NEXT: vsoxseg4ei64.v v8, (a0), v12, v0.t 1299; CHECK-NEXT: ret 1300entry: 1301 tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i64.nxv4i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %val, ptr %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 3) 1302 ret void 1303} 1304 1305declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 4), ptr, <vscale x 8 x i8>, i64, i64) 1306declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i8.nxv8i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 4), ptr, <vscale x 8 x i8>, <vscale x 8 x i1>, i64, i64) 1307 1308define void @test_vsoxseg4_nxv8i8_triscv.vector.tuple_nxv8i8_4t_nxv8i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, <vscale x 8 x i8> %index, i64 %vl) { 1309; CHECK-LABEL: test_vsoxseg4_nxv8i8_triscv.vector.tuple_nxv8i8_4t_nxv8i8: 1310; CHECK: # %bb.0: # %entry 1311; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma 1312; CHECK-NEXT: vsoxseg4ei8.v v8, (a0), v12 1313; CHECK-NEXT: ret 1314entry: 1315 tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, <vscale x 8 x i8> %index, i64 %vl, i64 3) 1316 ret void 1317} 1318 1319define void @test_vsoxseg4_mask_nxv8i8_triscv.vector.tuple_nxv8i8_4t_nxv8i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, <vscale x 8 x i8> %index, i64 %vl, <vscale x 8 x i1> %mask) { 1320; CHECK-LABEL: test_vsoxseg4_mask_nxv8i8_triscv.vector.tuple_nxv8i8_4t_nxv8i8: 1321; CHECK: # %bb.0: # %entry 1322; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma 1323; CHECK-NEXT: vsoxseg4ei8.v v8, (a0), v12, v0.t 1324; CHECK-NEXT: ret 1325entry: 1326 tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i8.nxv8i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, <vscale x 8 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 3) 1327 ret void 1328} 1329 1330declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 4), ptr, <vscale x 8 x i16>, i64, i64) 1331declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i16.nxv8i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 4), ptr, <vscale x 8 x i16>, <vscale x 8 x i1>, i64, i64) 1332 1333define void @test_vsoxseg4_nxv8i8_triscv.vector.tuple_nxv8i8_4t_nxv8i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, <vscale x 8 x i16> %index, i64 %vl) { 1334; CHECK-LABEL: test_vsoxseg4_nxv8i8_triscv.vector.tuple_nxv8i8_4t_nxv8i16: 1335; CHECK: # %bb.0: # %entry 1336; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma 1337; CHECK-NEXT: vsoxseg4ei16.v v8, (a0), v12 1338; CHECK-NEXT: ret 1339entry: 1340 tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, <vscale x 8 x i16> %index, i64 %vl, i64 3) 1341 ret void 1342} 1343 1344define void @test_vsoxseg4_mask_nxv8i8_triscv.vector.tuple_nxv8i8_4t_nxv8i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, <vscale x 8 x i16> %index, i64 %vl, <vscale x 8 x i1> %mask) { 1345; CHECK-LABEL: test_vsoxseg4_mask_nxv8i8_triscv.vector.tuple_nxv8i8_4t_nxv8i16: 1346; CHECK: # %bb.0: # %entry 1347; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma 1348; CHECK-NEXT: vsoxseg4ei16.v v8, (a0), v12, v0.t 1349; CHECK-NEXT: ret 1350entry: 1351 tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i16.nxv8i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 3) 1352 ret void 1353} 1354 1355declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 4), ptr, <vscale x 8 x i32>, i64, i64) 1356declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i32.nxv8i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 4), ptr, <vscale x 8 x i32>, <vscale x 8 x i1>, i64, i64) 1357 1358define void @test_vsoxseg4_nxv8i8_triscv.vector.tuple_nxv8i8_4t_nxv8i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, <vscale x 8 x i32> %index, i64 %vl) { 1359; CHECK-LABEL: test_vsoxseg4_nxv8i8_triscv.vector.tuple_nxv8i8_4t_nxv8i32: 1360; CHECK: # %bb.0: # %entry 1361; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma 1362; CHECK-NEXT: vsoxseg4ei32.v v8, (a0), v12 1363; CHECK-NEXT: ret 1364entry: 1365 tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, <vscale x 8 x i32> %index, i64 %vl, i64 3) 1366 ret void 1367} 1368 1369define void @test_vsoxseg4_mask_nxv8i8_triscv.vector.tuple_nxv8i8_4t_nxv8i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, <vscale x 8 x i32> %index, i64 %vl, <vscale x 8 x i1> %mask) { 1370; CHECK-LABEL: test_vsoxseg4_mask_nxv8i8_triscv.vector.tuple_nxv8i8_4t_nxv8i32: 1371; CHECK: # %bb.0: # %entry 1372; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma 1373; CHECK-NEXT: vsoxseg4ei32.v v8, (a0), v12, v0.t 1374; CHECK-NEXT: ret 1375entry: 1376 tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i32.nxv8i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 3) 1377 ret void 1378} 1379 1380declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 4), ptr, <vscale x 8 x i64>, i64, i64) 1381declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i64.nxv8i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 4), ptr, <vscale x 8 x i64>, <vscale x 8 x i1>, i64, i64) 1382 1383define void @test_vsoxseg4_nxv8i8_triscv.vector.tuple_nxv8i8_4t_nxv8i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, <vscale x 8 x i64> %index, i64 %vl) { 1384; CHECK-LABEL: test_vsoxseg4_nxv8i8_triscv.vector.tuple_nxv8i8_4t_nxv8i64: 1385; CHECK: # %bb.0: # %entry 1386; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma 1387; CHECK-NEXT: vsoxseg4ei64.v v8, (a0), v16 1388; CHECK-NEXT: ret 1389entry: 1390 tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, <vscale x 8 x i64> %index, i64 %vl, i64 3) 1391 ret void 1392} 1393 1394define void @test_vsoxseg4_mask_nxv8i8_triscv.vector.tuple_nxv8i8_4t_nxv8i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, <vscale x 8 x i64> %index, i64 %vl, <vscale x 8 x i1> %mask) { 1395; CHECK-LABEL: test_vsoxseg4_mask_nxv8i8_triscv.vector.tuple_nxv8i8_4t_nxv8i64: 1396; CHECK: # %bb.0: # %entry 1397; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma 1398; CHECK-NEXT: vsoxseg4ei64.v v8, (a0), v16, v0.t 1399; CHECK-NEXT: ret 1400entry: 1401 tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i64.nxv8i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, <vscale x 8 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 3) 1402 ret void 1403} 1404 1405declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i8(target("riscv.vector.tuple", <vscale x 16 x i8>, 4), ptr, <vscale x 16 x i8>, i64, i64) 1406declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i8.nxv16i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 4), ptr, <vscale x 16 x i8>, <vscale x 16 x i1>, i64, i64) 1407 1408define void @test_vsoxseg4_nxv16i8_triscv.vector.tuple_nxv16i8_4t_nxv16i8(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, <vscale x 16 x i8> %index, i64 %vl) { 1409; CHECK-LABEL: test_vsoxseg4_nxv16i8_triscv.vector.tuple_nxv16i8_4t_nxv16i8: 1410; CHECK: # %bb.0: # %entry 1411; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma 1412; CHECK-NEXT: vsoxseg4ei8.v v8, (a0), v16 1413; CHECK-NEXT: ret 1414entry: 1415 tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i8(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, <vscale x 16 x i8> %index, i64 %vl, i64 3) 1416 ret void 1417} 1418 1419define void @test_vsoxseg4_mask_nxv16i8_triscv.vector.tuple_nxv16i8_4t_nxv16i8(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, <vscale x 16 x i8> %index, i64 %vl, <vscale x 16 x i1> %mask) { 1420; CHECK-LABEL: test_vsoxseg4_mask_nxv16i8_triscv.vector.tuple_nxv16i8_4t_nxv16i8: 1421; CHECK: # %bb.0: # %entry 1422; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma 1423; CHECK-NEXT: vsoxseg4ei8.v v8, (a0), v16, v0.t 1424; CHECK-NEXT: ret 1425entry: 1426 tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i8.nxv16i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, <vscale x 16 x i8> %index, <vscale x 16 x i1> %mask, i64 %vl, i64 3) 1427 ret void 1428} 1429 1430declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i16(target("riscv.vector.tuple", <vscale x 16 x i8>, 4), ptr, <vscale x 16 x i16>, i64, i64) 1431declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i16.nxv16i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 4), ptr, <vscale x 16 x i16>, <vscale x 16 x i1>, i64, i64) 1432 1433define void @test_vsoxseg4_nxv16i8_triscv.vector.tuple_nxv16i8_4t_nxv16i16(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, <vscale x 16 x i16> %index, i64 %vl) { 1434; CHECK-LABEL: test_vsoxseg4_nxv16i8_triscv.vector.tuple_nxv16i8_4t_nxv16i16: 1435; CHECK: # %bb.0: # %entry 1436; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma 1437; CHECK-NEXT: vsoxseg4ei16.v v8, (a0), v16 1438; CHECK-NEXT: ret 1439entry: 1440 tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i16(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, <vscale x 16 x i16> %index, i64 %vl, i64 3) 1441 ret void 1442} 1443 1444define void @test_vsoxseg4_mask_nxv16i8_triscv.vector.tuple_nxv16i8_4t_nxv16i16(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, <vscale x 16 x i16> %index, i64 %vl, <vscale x 16 x i1> %mask) { 1445; CHECK-LABEL: test_vsoxseg4_mask_nxv16i8_triscv.vector.tuple_nxv16i8_4t_nxv16i16: 1446; CHECK: # %bb.0: # %entry 1447; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma 1448; CHECK-NEXT: vsoxseg4ei16.v v8, (a0), v16, v0.t 1449; CHECK-NEXT: ret 1450entry: 1451 tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i16.nxv16i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, <vscale x 16 x i16> %index, <vscale x 16 x i1> %mask, i64 %vl, i64 3) 1452 ret void 1453} 1454 1455declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i32(target("riscv.vector.tuple", <vscale x 16 x i8>, 4), ptr, <vscale x 16 x i32>, i64, i64) 1456declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i32.nxv16i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 4), ptr, <vscale x 16 x i32>, <vscale x 16 x i1>, i64, i64) 1457 1458define void @test_vsoxseg4_nxv16i8_triscv.vector.tuple_nxv16i8_4t_nxv16i32(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, <vscale x 16 x i32> %index, i64 %vl) { 1459; CHECK-LABEL: test_vsoxseg4_nxv16i8_triscv.vector.tuple_nxv16i8_4t_nxv16i32: 1460; CHECK: # %bb.0: # %entry 1461; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma 1462; CHECK-NEXT: vsoxseg4ei32.v v8, (a0), v16 1463; CHECK-NEXT: ret 1464entry: 1465 tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i32(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, <vscale x 16 x i32> %index, i64 %vl, i64 3) 1466 ret void 1467} 1468 1469define void @test_vsoxseg4_mask_nxv16i8_triscv.vector.tuple_nxv16i8_4t_nxv16i32(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, <vscale x 16 x i32> %index, i64 %vl, <vscale x 16 x i1> %mask) { 1470; CHECK-LABEL: test_vsoxseg4_mask_nxv16i8_triscv.vector.tuple_nxv16i8_4t_nxv16i32: 1471; CHECK: # %bb.0: # %entry 1472; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma 1473; CHECK-NEXT: vsoxseg4ei32.v v8, (a0), v16, v0.t 1474; CHECK-NEXT: ret 1475entry: 1476 tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i32.nxv16i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, <vscale x 16 x i32> %index, <vscale x 16 x i1> %mask, i64 %vl, i64 3) 1477 ret void 1478} 1479 1480declare void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i8(target("riscv.vector.tuple", <vscale x 1 x i8>, 5), ptr, <vscale x 1 x i8>, i64, i64) 1481declare void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i8.nxv1i1(target("riscv.vector.tuple", <vscale x 1 x i8>, 5), ptr, <vscale x 1 x i8>, <vscale x 1 x i1>, i64, i64) 1482 1483define void @test_vsoxseg5_nxv1i8_triscv.vector.tuple_nxv1i8_5t_nxv1i8(target("riscv.vector.tuple", <vscale x 1 x i8>, 5) %val, ptr %base, <vscale x 1 x i8> %index, i64 %vl) { 1484; CHECK-LABEL: test_vsoxseg5_nxv1i8_triscv.vector.tuple_nxv1i8_5t_nxv1i8: 1485; CHECK: # %bb.0: # %entry 1486; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma 1487; CHECK-NEXT: vsoxseg5ei8.v v8, (a0), v13 1488; CHECK-NEXT: ret 1489entry: 1490 tail call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i8(target("riscv.vector.tuple", <vscale x 1 x i8>, 5) %val, ptr %base, <vscale x 1 x i8> %index, i64 %vl, i64 3) 1491 ret void 1492} 1493 1494define void @test_vsoxseg5_mask_nxv1i8_triscv.vector.tuple_nxv1i8_5t_nxv1i8(target("riscv.vector.tuple", <vscale x 1 x i8>, 5) %val, ptr %base, <vscale x 1 x i8> %index, i64 %vl, <vscale x 1 x i1> %mask) { 1495; CHECK-LABEL: test_vsoxseg5_mask_nxv1i8_triscv.vector.tuple_nxv1i8_5t_nxv1i8: 1496; CHECK: # %bb.0: # %entry 1497; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma 1498; CHECK-NEXT: vsoxseg5ei8.v v8, (a0), v13, v0.t 1499; CHECK-NEXT: ret 1500entry: 1501 tail call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i8.nxv1i1(target("riscv.vector.tuple", <vscale x 1 x i8>, 5) %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 3) 1502 ret void 1503} 1504 1505declare void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i16(target("riscv.vector.tuple", <vscale x 1 x i8>, 5), ptr, <vscale x 1 x i16>, i64, i64) 1506declare void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i16.nxv1i1(target("riscv.vector.tuple", <vscale x 1 x i8>, 5), ptr, <vscale x 1 x i16>, <vscale x 1 x i1>, i64, i64) 1507 1508define void @test_vsoxseg5_nxv1i8_triscv.vector.tuple_nxv1i8_5t_nxv1i16(target("riscv.vector.tuple", <vscale x 1 x i8>, 5) %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl) { 1509; CHECK-LABEL: test_vsoxseg5_nxv1i8_triscv.vector.tuple_nxv1i8_5t_nxv1i16: 1510; CHECK: # %bb.0: # %entry 1511; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma 1512; CHECK-NEXT: vsoxseg5ei16.v v8, (a0), v13 1513; CHECK-NEXT: ret 1514entry: 1515 tail call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i16(target("riscv.vector.tuple", <vscale x 1 x i8>, 5) %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl, i64 3) 1516 ret void 1517} 1518 1519define void @test_vsoxseg5_mask_nxv1i8_triscv.vector.tuple_nxv1i8_5t_nxv1i16(target("riscv.vector.tuple", <vscale x 1 x i8>, 5) %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl, <vscale x 1 x i1> %mask) { 1520; CHECK-LABEL: test_vsoxseg5_mask_nxv1i8_triscv.vector.tuple_nxv1i8_5t_nxv1i16: 1521; CHECK: # %bb.0: # %entry 1522; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma 1523; CHECK-NEXT: vsoxseg5ei16.v v8, (a0), v13, v0.t 1524; CHECK-NEXT: ret 1525entry: 1526 tail call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i16.nxv1i1(target("riscv.vector.tuple", <vscale x 1 x i8>, 5) %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 3) 1527 ret void 1528} 1529 1530declare void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i32(target("riscv.vector.tuple", <vscale x 1 x i8>, 5), ptr, <vscale x 1 x i32>, i64, i64) 1531declare void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i32.nxv1i1(target("riscv.vector.tuple", <vscale x 1 x i8>, 5), ptr, <vscale x 1 x i32>, <vscale x 1 x i1>, i64, i64) 1532 1533define void @test_vsoxseg5_nxv1i8_triscv.vector.tuple_nxv1i8_5t_nxv1i32(target("riscv.vector.tuple", <vscale x 1 x i8>, 5) %val, ptr %base, <vscale x 1 x i32> %index, i64 %vl) { 1534; CHECK-LABEL: test_vsoxseg5_nxv1i8_triscv.vector.tuple_nxv1i8_5t_nxv1i32: 1535; CHECK: # %bb.0: # %entry 1536; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma 1537; CHECK-NEXT: vsoxseg5ei32.v v8, (a0), v13 1538; CHECK-NEXT: ret 1539entry: 1540 tail call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i32(target("riscv.vector.tuple", <vscale x 1 x i8>, 5) %val, ptr %base, <vscale x 1 x i32> %index, i64 %vl, i64 3) 1541 ret void 1542} 1543 1544define void @test_vsoxseg5_mask_nxv1i8_triscv.vector.tuple_nxv1i8_5t_nxv1i32(target("riscv.vector.tuple", <vscale x 1 x i8>, 5) %val, ptr %base, <vscale x 1 x i32> %index, i64 %vl, <vscale x 1 x i1> %mask) { 1545; CHECK-LABEL: test_vsoxseg5_mask_nxv1i8_triscv.vector.tuple_nxv1i8_5t_nxv1i32: 1546; CHECK: # %bb.0: # %entry 1547; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma 1548; CHECK-NEXT: vsoxseg5ei32.v v8, (a0), v13, v0.t 1549; CHECK-NEXT: ret 1550entry: 1551 tail call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i32.nxv1i1(target("riscv.vector.tuple", <vscale x 1 x i8>, 5) %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 3) 1552 ret void 1553} 1554 1555declare void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i64(target("riscv.vector.tuple", <vscale x 1 x i8>, 5), ptr, <vscale x 1 x i64>, i64, i64) 1556declare void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i64.nxv1i1(target("riscv.vector.tuple", <vscale x 1 x i8>, 5), ptr, <vscale x 1 x i64>, <vscale x 1 x i1>, i64, i64) 1557 1558define void @test_vsoxseg5_nxv1i8_triscv.vector.tuple_nxv1i8_5t_nxv1i64(target("riscv.vector.tuple", <vscale x 1 x i8>, 5) %val, ptr %base, <vscale x 1 x i64> %index, i64 %vl) { 1559; CHECK-LABEL: test_vsoxseg5_nxv1i8_triscv.vector.tuple_nxv1i8_5t_nxv1i64: 1560; CHECK: # %bb.0: # %entry 1561; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma 1562; CHECK-NEXT: vsoxseg5ei64.v v8, (a0), v13 1563; CHECK-NEXT: ret 1564entry: 1565 tail call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i64(target("riscv.vector.tuple", <vscale x 1 x i8>, 5) %val, ptr %base, <vscale x 1 x i64> %index, i64 %vl, i64 3) 1566 ret void 1567} 1568 1569define void @test_vsoxseg5_mask_nxv1i8_triscv.vector.tuple_nxv1i8_5t_nxv1i64(target("riscv.vector.tuple", <vscale x 1 x i8>, 5) %val, ptr %base, <vscale x 1 x i64> %index, i64 %vl, <vscale x 1 x i1> %mask) { 1570; CHECK-LABEL: test_vsoxseg5_mask_nxv1i8_triscv.vector.tuple_nxv1i8_5t_nxv1i64: 1571; CHECK: # %bb.0: # %entry 1572; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma 1573; CHECK-NEXT: vsoxseg5ei64.v v8, (a0), v13, v0.t 1574; CHECK-NEXT: ret 1575entry: 1576 tail call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i64.nxv1i1(target("riscv.vector.tuple", <vscale x 1 x i8>, 5) %val, ptr %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 3) 1577 ret void 1578} 1579 1580declare void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i8(target("riscv.vector.tuple", <vscale x 2 x i8>, 5), ptr, <vscale x 2 x i8>, i64, i64) 1581declare void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i8.nxv2i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 5), ptr, <vscale x 2 x i8>, <vscale x 2 x i1>, i64, i64) 1582 1583define void @test_vsoxseg5_nxv2i8_triscv.vector.tuple_nxv2i8_5t_nxv2i8(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) %val, ptr %base, <vscale x 2 x i8> %index, i64 %vl) { 1584; CHECK-LABEL: test_vsoxseg5_nxv2i8_triscv.vector.tuple_nxv2i8_5t_nxv2i8: 1585; CHECK: # %bb.0: # %entry 1586; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma 1587; CHECK-NEXT: vsoxseg5ei8.v v8, (a0), v13 1588; CHECK-NEXT: ret 1589entry: 1590 tail call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i8(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) %val, ptr %base, <vscale x 2 x i8> %index, i64 %vl, i64 3) 1591 ret void 1592} 1593 1594define void @test_vsoxseg5_mask_nxv2i8_triscv.vector.tuple_nxv2i8_5t_nxv2i8(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) %val, ptr %base, <vscale x 2 x i8> %index, i64 %vl, <vscale x 2 x i1> %mask) { 1595; CHECK-LABEL: test_vsoxseg5_mask_nxv2i8_triscv.vector.tuple_nxv2i8_5t_nxv2i8: 1596; CHECK: # %bb.0: # %entry 1597; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma 1598; CHECK-NEXT: vsoxseg5ei8.v v8, (a0), v13, v0.t 1599; CHECK-NEXT: ret 1600entry: 1601 tail call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i8.nxv2i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 3) 1602 ret void 1603} 1604 1605declare void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i16(target("riscv.vector.tuple", <vscale x 2 x i8>, 5), ptr, <vscale x 2 x i16>, i64, i64) 1606declare void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i16.nxv2i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 5), ptr, <vscale x 2 x i16>, <vscale x 2 x i1>, i64, i64) 1607 1608define void @test_vsoxseg5_nxv2i8_triscv.vector.tuple_nxv2i8_5t_nxv2i16(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) %val, ptr %base, <vscale x 2 x i16> %index, i64 %vl) { 1609; CHECK-LABEL: test_vsoxseg5_nxv2i8_triscv.vector.tuple_nxv2i8_5t_nxv2i16: 1610; CHECK: # %bb.0: # %entry 1611; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma 1612; CHECK-NEXT: vsoxseg5ei16.v v8, (a0), v13 1613; CHECK-NEXT: ret 1614entry: 1615 tail call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i16(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) %val, ptr %base, <vscale x 2 x i16> %index, i64 %vl, i64 3) 1616 ret void 1617} 1618 1619define void @test_vsoxseg5_mask_nxv2i8_triscv.vector.tuple_nxv2i8_5t_nxv2i16(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) %val, ptr %base, <vscale x 2 x i16> %index, i64 %vl, <vscale x 2 x i1> %mask) { 1620; CHECK-LABEL: test_vsoxseg5_mask_nxv2i8_triscv.vector.tuple_nxv2i8_5t_nxv2i16: 1621; CHECK: # %bb.0: # %entry 1622; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma 1623; CHECK-NEXT: vsoxseg5ei16.v v8, (a0), v13, v0.t 1624; CHECK-NEXT: ret 1625entry: 1626 tail call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i16.nxv2i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 3) 1627 ret void 1628} 1629 1630declare void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i32(target("riscv.vector.tuple", <vscale x 2 x i8>, 5), ptr, <vscale x 2 x i32>, i64, i64) 1631declare void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i32.nxv2i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 5), ptr, <vscale x 2 x i32>, <vscale x 2 x i1>, i64, i64) 1632 1633define void @test_vsoxseg5_nxv2i8_triscv.vector.tuple_nxv2i8_5t_nxv2i32(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) %val, ptr %base, <vscale x 2 x i32> %index, i64 %vl) { 1634; CHECK-LABEL: test_vsoxseg5_nxv2i8_triscv.vector.tuple_nxv2i8_5t_nxv2i32: 1635; CHECK: # %bb.0: # %entry 1636; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma 1637; CHECK-NEXT: vsoxseg5ei32.v v8, (a0), v13 1638; CHECK-NEXT: ret 1639entry: 1640 tail call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i32(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) %val, ptr %base, <vscale x 2 x i32> %index, i64 %vl, i64 3) 1641 ret void 1642} 1643 1644define void @test_vsoxseg5_mask_nxv2i8_triscv.vector.tuple_nxv2i8_5t_nxv2i32(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) %val, ptr %base, <vscale x 2 x i32> %index, i64 %vl, <vscale x 2 x i1> %mask) { 1645; CHECK-LABEL: test_vsoxseg5_mask_nxv2i8_triscv.vector.tuple_nxv2i8_5t_nxv2i32: 1646; CHECK: # %bb.0: # %entry 1647; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma 1648; CHECK-NEXT: vsoxseg5ei32.v v8, (a0), v13, v0.t 1649; CHECK-NEXT: ret 1650entry: 1651 tail call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i32.nxv2i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 3) 1652 ret void 1653} 1654 1655declare void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 5), ptr, <vscale x 2 x i64>, i64, i64) 1656declare void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i64.nxv2i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 5), ptr, <vscale x 2 x i64>, <vscale x 2 x i1>, i64, i64) 1657 1658define void @test_vsoxseg5_nxv2i8_triscv.vector.tuple_nxv2i8_5t_nxv2i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl) { 1659; CHECK-LABEL: test_vsoxseg5_nxv2i8_triscv.vector.tuple_nxv2i8_5t_nxv2i64: 1660; CHECK: # %bb.0: # %entry 1661; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma 1662; CHECK-NEXT: vsoxseg5ei64.v v8, (a0), v14 1663; CHECK-NEXT: ret 1664entry: 1665 tail call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl, i64 3) 1666 ret void 1667} 1668 1669define void @test_vsoxseg5_mask_nxv2i8_triscv.vector.tuple_nxv2i8_5t_nxv2i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl, <vscale x 2 x i1> %mask) { 1670; CHECK-LABEL: test_vsoxseg5_mask_nxv2i8_triscv.vector.tuple_nxv2i8_5t_nxv2i64: 1671; CHECK: # %bb.0: # %entry 1672; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma 1673; CHECK-NEXT: vsoxseg5ei64.v v8, (a0), v14, v0.t 1674; CHECK-NEXT: ret 1675entry: 1676 tail call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i64.nxv2i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) %val, ptr %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 3) 1677 ret void 1678} 1679 1680declare void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 5), ptr, <vscale x 4 x i8>, i64, i64) 1681declare void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i8.nxv4i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 5), ptr, <vscale x 4 x i8>, <vscale x 4 x i1>, i64, i64) 1682 1683define void @test_vsoxseg5_nxv4i8_triscv.vector.tuple_nxv4i8_5t_nxv4i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) %val, ptr %base, <vscale x 4 x i8> %index, i64 %vl) { 1684; CHECK-LABEL: test_vsoxseg5_nxv4i8_triscv.vector.tuple_nxv4i8_5t_nxv4i8: 1685; CHECK: # %bb.0: # %entry 1686; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma 1687; CHECK-NEXT: vsoxseg5ei8.v v8, (a0), v13 1688; CHECK-NEXT: ret 1689entry: 1690 tail call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) %val, ptr %base, <vscale x 4 x i8> %index, i64 %vl, i64 3) 1691 ret void 1692} 1693 1694define void @test_vsoxseg5_mask_nxv4i8_triscv.vector.tuple_nxv4i8_5t_nxv4i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) %val, ptr %base, <vscale x 4 x i8> %index, i64 %vl, <vscale x 4 x i1> %mask) { 1695; CHECK-LABEL: test_vsoxseg5_mask_nxv4i8_triscv.vector.tuple_nxv4i8_5t_nxv4i8: 1696; CHECK: # %bb.0: # %entry 1697; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma 1698; CHECK-NEXT: vsoxseg5ei8.v v8, (a0), v13, v0.t 1699; CHECK-NEXT: ret 1700entry: 1701 tail call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i8.nxv4i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) %val, ptr %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 3) 1702 ret void 1703} 1704 1705declare void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 5), ptr, <vscale x 4 x i16>, i64, i64) 1706declare void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i16.nxv4i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 5), ptr, <vscale x 4 x i16>, <vscale x 4 x i1>, i64, i64) 1707 1708define void @test_vsoxseg5_nxv4i8_triscv.vector.tuple_nxv4i8_5t_nxv4i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) %val, ptr %base, <vscale x 4 x i16> %index, i64 %vl) { 1709; CHECK-LABEL: test_vsoxseg5_nxv4i8_triscv.vector.tuple_nxv4i8_5t_nxv4i16: 1710; CHECK: # %bb.0: # %entry 1711; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma 1712; CHECK-NEXT: vsoxseg5ei16.v v8, (a0), v13 1713; CHECK-NEXT: ret 1714entry: 1715 tail call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) %val, ptr %base, <vscale x 4 x i16> %index, i64 %vl, i64 3) 1716 ret void 1717} 1718 1719define void @test_vsoxseg5_mask_nxv4i8_triscv.vector.tuple_nxv4i8_5t_nxv4i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) %val, ptr %base, <vscale x 4 x i16> %index, i64 %vl, <vscale x 4 x i1> %mask) { 1720; CHECK-LABEL: test_vsoxseg5_mask_nxv4i8_triscv.vector.tuple_nxv4i8_5t_nxv4i16: 1721; CHECK: # %bb.0: # %entry 1722; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma 1723; CHECK-NEXT: vsoxseg5ei16.v v8, (a0), v13, v0.t 1724; CHECK-NEXT: ret 1725entry: 1726 tail call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i16.nxv4i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) %val, ptr %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 3) 1727 ret void 1728} 1729 1730declare void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 5), ptr, <vscale x 4 x i32>, i64, i64) 1731declare void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i32.nxv4i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 5), ptr, <vscale x 4 x i32>, <vscale x 4 x i1>, i64, i64) 1732 1733define void @test_vsoxseg5_nxv4i8_triscv.vector.tuple_nxv4i8_5t_nxv4i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) %val, ptr %base, <vscale x 4 x i32> %index, i64 %vl) { 1734; CHECK-LABEL: test_vsoxseg5_nxv4i8_triscv.vector.tuple_nxv4i8_5t_nxv4i32: 1735; CHECK: # %bb.0: # %entry 1736; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma 1737; CHECK-NEXT: vsoxseg5ei32.v v8, (a0), v14 1738; CHECK-NEXT: ret 1739entry: 1740 tail call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) %val, ptr %base, <vscale x 4 x i32> %index, i64 %vl, i64 3) 1741 ret void 1742} 1743 1744define void @test_vsoxseg5_mask_nxv4i8_triscv.vector.tuple_nxv4i8_5t_nxv4i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) %val, ptr %base, <vscale x 4 x i32> %index, i64 %vl, <vscale x 4 x i1> %mask) { 1745; CHECK-LABEL: test_vsoxseg5_mask_nxv4i8_triscv.vector.tuple_nxv4i8_5t_nxv4i32: 1746; CHECK: # %bb.0: # %entry 1747; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma 1748; CHECK-NEXT: vsoxseg5ei32.v v8, (a0), v14, v0.t 1749; CHECK-NEXT: ret 1750entry: 1751 tail call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i32.nxv4i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 3) 1752 ret void 1753} 1754 1755declare void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 5), ptr, <vscale x 4 x i64>, i64, i64) 1756declare void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i64.nxv4i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 5), ptr, <vscale x 4 x i64>, <vscale x 4 x i1>, i64, i64) 1757 1758define void @test_vsoxseg5_nxv4i8_triscv.vector.tuple_nxv4i8_5t_nxv4i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) %val, ptr %base, <vscale x 4 x i64> %index, i64 %vl) { 1759; CHECK-LABEL: test_vsoxseg5_nxv4i8_triscv.vector.tuple_nxv4i8_5t_nxv4i64: 1760; CHECK: # %bb.0: # %entry 1761; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma 1762; CHECK-NEXT: vsoxseg5ei64.v v8, (a0), v16 1763; CHECK-NEXT: ret 1764entry: 1765 tail call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) %val, ptr %base, <vscale x 4 x i64> %index, i64 %vl, i64 3) 1766 ret void 1767} 1768 1769define void @test_vsoxseg5_mask_nxv4i8_triscv.vector.tuple_nxv4i8_5t_nxv4i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) %val, ptr %base, <vscale x 4 x i64> %index, i64 %vl, <vscale x 4 x i1> %mask) { 1770; CHECK-LABEL: test_vsoxseg5_mask_nxv4i8_triscv.vector.tuple_nxv4i8_5t_nxv4i64: 1771; CHECK: # %bb.0: # %entry 1772; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma 1773; CHECK-NEXT: vsoxseg5ei64.v v8, (a0), v16, v0.t 1774; CHECK-NEXT: ret 1775entry: 1776 tail call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i64.nxv4i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) %val, ptr %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 3) 1777 ret void 1778} 1779 1780declare void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 5), ptr, <vscale x 8 x i8>, i64, i64) 1781declare void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i8.nxv8i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 5), ptr, <vscale x 8 x i8>, <vscale x 8 x i1>, i64, i64) 1782 1783define void @test_vsoxseg5_nxv8i8_triscv.vector.tuple_nxv8i8_5t_nxv8i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, <vscale x 8 x i8> %index, i64 %vl) { 1784; CHECK-LABEL: test_vsoxseg5_nxv8i8_triscv.vector.tuple_nxv8i8_5t_nxv8i8: 1785; CHECK: # %bb.0: # %entry 1786; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma 1787; CHECK-NEXT: vsoxseg5ei8.v v8, (a0), v13 1788; CHECK-NEXT: ret 1789entry: 1790 tail call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, <vscale x 8 x i8> %index, i64 %vl, i64 3) 1791 ret void 1792} 1793 1794define void @test_vsoxseg5_mask_nxv8i8_triscv.vector.tuple_nxv8i8_5t_nxv8i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, <vscale x 8 x i8> %index, i64 %vl, <vscale x 8 x i1> %mask) { 1795; CHECK-LABEL: test_vsoxseg5_mask_nxv8i8_triscv.vector.tuple_nxv8i8_5t_nxv8i8: 1796; CHECK: # %bb.0: # %entry 1797; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma 1798; CHECK-NEXT: vsoxseg5ei8.v v8, (a0), v13, v0.t 1799; CHECK-NEXT: ret 1800entry: 1801 tail call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i8.nxv8i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, <vscale x 8 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 3) 1802 ret void 1803} 1804 1805declare void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 5), ptr, <vscale x 8 x i16>, i64, i64) 1806declare void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i16.nxv8i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 5), ptr, <vscale x 8 x i16>, <vscale x 8 x i1>, i64, i64) 1807 1808define void @test_vsoxseg5_nxv8i8_triscv.vector.tuple_nxv8i8_5t_nxv8i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, <vscale x 8 x i16> %index, i64 %vl) { 1809; CHECK-LABEL: test_vsoxseg5_nxv8i8_triscv.vector.tuple_nxv8i8_5t_nxv8i16: 1810; CHECK: # %bb.0: # %entry 1811; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma 1812; CHECK-NEXT: vsoxseg5ei16.v v8, (a0), v14 1813; CHECK-NEXT: ret 1814entry: 1815 tail call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, <vscale x 8 x i16> %index, i64 %vl, i64 3) 1816 ret void 1817} 1818 1819define void @test_vsoxseg5_mask_nxv8i8_triscv.vector.tuple_nxv8i8_5t_nxv8i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, <vscale x 8 x i16> %index, i64 %vl, <vscale x 8 x i1> %mask) { 1820; CHECK-LABEL: test_vsoxseg5_mask_nxv8i8_triscv.vector.tuple_nxv8i8_5t_nxv8i16: 1821; CHECK: # %bb.0: # %entry 1822; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma 1823; CHECK-NEXT: vsoxseg5ei16.v v8, (a0), v14, v0.t 1824; CHECK-NEXT: ret 1825entry: 1826 tail call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i16.nxv8i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 3) 1827 ret void 1828} 1829 1830declare void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 5), ptr, <vscale x 8 x i32>, i64, i64) 1831declare void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i32.nxv8i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 5), ptr, <vscale x 8 x i32>, <vscale x 8 x i1>, i64, i64) 1832 1833define void @test_vsoxseg5_nxv8i8_triscv.vector.tuple_nxv8i8_5t_nxv8i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, <vscale x 8 x i32> %index, i64 %vl) { 1834; CHECK-LABEL: test_vsoxseg5_nxv8i8_triscv.vector.tuple_nxv8i8_5t_nxv8i32: 1835; CHECK: # %bb.0: # %entry 1836; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma 1837; CHECK-NEXT: vsoxseg5ei32.v v8, (a0), v16 1838; CHECK-NEXT: ret 1839entry: 1840 tail call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, <vscale x 8 x i32> %index, i64 %vl, i64 3) 1841 ret void 1842} 1843 1844define void @test_vsoxseg5_mask_nxv8i8_triscv.vector.tuple_nxv8i8_5t_nxv8i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, <vscale x 8 x i32> %index, i64 %vl, <vscale x 8 x i1> %mask) { 1845; CHECK-LABEL: test_vsoxseg5_mask_nxv8i8_triscv.vector.tuple_nxv8i8_5t_nxv8i32: 1846; CHECK: # %bb.0: # %entry 1847; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma 1848; CHECK-NEXT: vsoxseg5ei32.v v8, (a0), v16, v0.t 1849; CHECK-NEXT: ret 1850entry: 1851 tail call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i32.nxv8i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 3) 1852 ret void 1853} 1854 1855declare void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 5), ptr, <vscale x 8 x i64>, i64, i64) 1856declare void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i64.nxv8i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 5), ptr, <vscale x 8 x i64>, <vscale x 8 x i1>, i64, i64) 1857 1858define void @test_vsoxseg5_nxv8i8_triscv.vector.tuple_nxv8i8_5t_nxv8i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, <vscale x 8 x i64> %index, i64 %vl) { 1859; CHECK-LABEL: test_vsoxseg5_nxv8i8_triscv.vector.tuple_nxv8i8_5t_nxv8i64: 1860; CHECK: # %bb.0: # %entry 1861; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma 1862; CHECK-NEXT: vsoxseg5ei64.v v8, (a0), v16 1863; CHECK-NEXT: ret 1864entry: 1865 tail call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, <vscale x 8 x i64> %index, i64 %vl, i64 3) 1866 ret void 1867} 1868 1869define void @test_vsoxseg5_mask_nxv8i8_triscv.vector.tuple_nxv8i8_5t_nxv8i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, <vscale x 8 x i64> %index, i64 %vl, <vscale x 8 x i1> %mask) { 1870; CHECK-LABEL: test_vsoxseg5_mask_nxv8i8_triscv.vector.tuple_nxv8i8_5t_nxv8i64: 1871; CHECK: # %bb.0: # %entry 1872; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma 1873; CHECK-NEXT: vsoxseg5ei64.v v8, (a0), v16, v0.t 1874; CHECK-NEXT: ret 1875entry: 1876 tail call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i64.nxv8i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, <vscale x 8 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 3) 1877 ret void 1878} 1879 1880declare void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i8(target("riscv.vector.tuple", <vscale x 1 x i8>, 6), ptr, <vscale x 1 x i8>, i64, i64) 1881declare void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i8.nxv1i1(target("riscv.vector.tuple", <vscale x 1 x i8>, 6), ptr, <vscale x 1 x i8>, <vscale x 1 x i1>, i64, i64) 1882 1883define void @test_vsoxseg6_nxv1i8_triscv.vector.tuple_nxv1i8_6t_nxv1i8(target("riscv.vector.tuple", <vscale x 1 x i8>, 6) %val, ptr %base, <vscale x 1 x i8> %index, i64 %vl) { 1884; CHECK-LABEL: test_vsoxseg6_nxv1i8_triscv.vector.tuple_nxv1i8_6t_nxv1i8: 1885; CHECK: # %bb.0: # %entry 1886; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma 1887; CHECK-NEXT: vsoxseg6ei8.v v8, (a0), v14 1888; CHECK-NEXT: ret 1889entry: 1890 tail call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i8(target("riscv.vector.tuple", <vscale x 1 x i8>, 6) %val, ptr %base, <vscale x 1 x i8> %index, i64 %vl, i64 3) 1891 ret void 1892} 1893 1894define void @test_vsoxseg6_mask_nxv1i8_triscv.vector.tuple_nxv1i8_6t_nxv1i8(target("riscv.vector.tuple", <vscale x 1 x i8>, 6) %val, ptr %base, <vscale x 1 x i8> %index, i64 %vl, <vscale x 1 x i1> %mask) { 1895; CHECK-LABEL: test_vsoxseg6_mask_nxv1i8_triscv.vector.tuple_nxv1i8_6t_nxv1i8: 1896; CHECK: # %bb.0: # %entry 1897; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma 1898; CHECK-NEXT: vsoxseg6ei8.v v8, (a0), v14, v0.t 1899; CHECK-NEXT: ret 1900entry: 1901 tail call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i8.nxv1i1(target("riscv.vector.tuple", <vscale x 1 x i8>, 6) %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 3) 1902 ret void 1903} 1904 1905declare void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i16(target("riscv.vector.tuple", <vscale x 1 x i8>, 6), ptr, <vscale x 1 x i16>, i64, i64) 1906declare void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i16.nxv1i1(target("riscv.vector.tuple", <vscale x 1 x i8>, 6), ptr, <vscale x 1 x i16>, <vscale x 1 x i1>, i64, i64) 1907 1908define void @test_vsoxseg6_nxv1i8_triscv.vector.tuple_nxv1i8_6t_nxv1i16(target("riscv.vector.tuple", <vscale x 1 x i8>, 6) %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl) { 1909; CHECK-LABEL: test_vsoxseg6_nxv1i8_triscv.vector.tuple_nxv1i8_6t_nxv1i16: 1910; CHECK: # %bb.0: # %entry 1911; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma 1912; CHECK-NEXT: vsoxseg6ei16.v v8, (a0), v14 1913; CHECK-NEXT: ret 1914entry: 1915 tail call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i16(target("riscv.vector.tuple", <vscale x 1 x i8>, 6) %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl, i64 3) 1916 ret void 1917} 1918 1919define void @test_vsoxseg6_mask_nxv1i8_triscv.vector.tuple_nxv1i8_6t_nxv1i16(target("riscv.vector.tuple", <vscale x 1 x i8>, 6) %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl, <vscale x 1 x i1> %mask) { 1920; CHECK-LABEL: test_vsoxseg6_mask_nxv1i8_triscv.vector.tuple_nxv1i8_6t_nxv1i16: 1921; CHECK: # %bb.0: # %entry 1922; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma 1923; CHECK-NEXT: vsoxseg6ei16.v v8, (a0), v14, v0.t 1924; CHECK-NEXT: ret 1925entry: 1926 tail call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i16.nxv1i1(target("riscv.vector.tuple", <vscale x 1 x i8>, 6) %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 3) 1927 ret void 1928} 1929 1930declare void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i32(target("riscv.vector.tuple", <vscale x 1 x i8>, 6), ptr, <vscale x 1 x i32>, i64, i64) 1931declare void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i32.nxv1i1(target("riscv.vector.tuple", <vscale x 1 x i8>, 6), ptr, <vscale x 1 x i32>, <vscale x 1 x i1>, i64, i64) 1932 1933define void @test_vsoxseg6_nxv1i8_triscv.vector.tuple_nxv1i8_6t_nxv1i32(target("riscv.vector.tuple", <vscale x 1 x i8>, 6) %val, ptr %base, <vscale x 1 x i32> %index, i64 %vl) { 1934; CHECK-LABEL: test_vsoxseg6_nxv1i8_triscv.vector.tuple_nxv1i8_6t_nxv1i32: 1935; CHECK: # %bb.0: # %entry 1936; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma 1937; CHECK-NEXT: vsoxseg6ei32.v v8, (a0), v14 1938; CHECK-NEXT: ret 1939entry: 1940 tail call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i32(target("riscv.vector.tuple", <vscale x 1 x i8>, 6) %val, ptr %base, <vscale x 1 x i32> %index, i64 %vl, i64 3) 1941 ret void 1942} 1943 1944define void @test_vsoxseg6_mask_nxv1i8_triscv.vector.tuple_nxv1i8_6t_nxv1i32(target("riscv.vector.tuple", <vscale x 1 x i8>, 6) %val, ptr %base, <vscale x 1 x i32> %index, i64 %vl, <vscale x 1 x i1> %mask) { 1945; CHECK-LABEL: test_vsoxseg6_mask_nxv1i8_triscv.vector.tuple_nxv1i8_6t_nxv1i32: 1946; CHECK: # %bb.0: # %entry 1947; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma 1948; CHECK-NEXT: vsoxseg6ei32.v v8, (a0), v14, v0.t 1949; CHECK-NEXT: ret 1950entry: 1951 tail call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i32.nxv1i1(target("riscv.vector.tuple", <vscale x 1 x i8>, 6) %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 3) 1952 ret void 1953} 1954 1955declare void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i64(target("riscv.vector.tuple", <vscale x 1 x i8>, 6), ptr, <vscale x 1 x i64>, i64, i64) 1956declare void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i64.nxv1i1(target("riscv.vector.tuple", <vscale x 1 x i8>, 6), ptr, <vscale x 1 x i64>, <vscale x 1 x i1>, i64, i64) 1957 1958define void @test_vsoxseg6_nxv1i8_triscv.vector.tuple_nxv1i8_6t_nxv1i64(target("riscv.vector.tuple", <vscale x 1 x i8>, 6) %val, ptr %base, <vscale x 1 x i64> %index, i64 %vl) { 1959; CHECK-LABEL: test_vsoxseg6_nxv1i8_triscv.vector.tuple_nxv1i8_6t_nxv1i64: 1960; CHECK: # %bb.0: # %entry 1961; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma 1962; CHECK-NEXT: vsoxseg6ei64.v v8, (a0), v14 1963; CHECK-NEXT: ret 1964entry: 1965 tail call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i64(target("riscv.vector.tuple", <vscale x 1 x i8>, 6) %val, ptr %base, <vscale x 1 x i64> %index, i64 %vl, i64 3) 1966 ret void 1967} 1968 1969define void @test_vsoxseg6_mask_nxv1i8_triscv.vector.tuple_nxv1i8_6t_nxv1i64(target("riscv.vector.tuple", <vscale x 1 x i8>, 6) %val, ptr %base, <vscale x 1 x i64> %index, i64 %vl, <vscale x 1 x i1> %mask) { 1970; CHECK-LABEL: test_vsoxseg6_mask_nxv1i8_triscv.vector.tuple_nxv1i8_6t_nxv1i64: 1971; CHECK: # %bb.0: # %entry 1972; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma 1973; CHECK-NEXT: vsoxseg6ei64.v v8, (a0), v14, v0.t 1974; CHECK-NEXT: ret 1975entry: 1976 tail call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i64.nxv1i1(target("riscv.vector.tuple", <vscale x 1 x i8>, 6) %val, ptr %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 3) 1977 ret void 1978} 1979 1980declare void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i8(target("riscv.vector.tuple", <vscale x 2 x i8>, 6), ptr, <vscale x 2 x i8>, i64, i64) 1981declare void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i8.nxv2i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 6), ptr, <vscale x 2 x i8>, <vscale x 2 x i1>, i64, i64) 1982 1983define void @test_vsoxseg6_nxv2i8_triscv.vector.tuple_nxv2i8_6t_nxv2i8(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) %val, ptr %base, <vscale x 2 x i8> %index, i64 %vl) { 1984; CHECK-LABEL: test_vsoxseg6_nxv2i8_triscv.vector.tuple_nxv2i8_6t_nxv2i8: 1985; CHECK: # %bb.0: # %entry 1986; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma 1987; CHECK-NEXT: vsoxseg6ei8.v v8, (a0), v14 1988; CHECK-NEXT: ret 1989entry: 1990 tail call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i8(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) %val, ptr %base, <vscale x 2 x i8> %index, i64 %vl, i64 3) 1991 ret void 1992} 1993 1994define void @test_vsoxseg6_mask_nxv2i8_triscv.vector.tuple_nxv2i8_6t_nxv2i8(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) %val, ptr %base, <vscale x 2 x i8> %index, i64 %vl, <vscale x 2 x i1> %mask) { 1995; CHECK-LABEL: test_vsoxseg6_mask_nxv2i8_triscv.vector.tuple_nxv2i8_6t_nxv2i8: 1996; CHECK: # %bb.0: # %entry 1997; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma 1998; CHECK-NEXT: vsoxseg6ei8.v v8, (a0), v14, v0.t 1999; CHECK-NEXT: ret 2000entry: 2001 tail call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i8.nxv2i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 3) 2002 ret void 2003} 2004 2005declare void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i16(target("riscv.vector.tuple", <vscale x 2 x i8>, 6), ptr, <vscale x 2 x i16>, i64, i64) 2006declare void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i16.nxv2i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 6), ptr, <vscale x 2 x i16>, <vscale x 2 x i1>, i64, i64) 2007 2008define void @test_vsoxseg6_nxv2i8_triscv.vector.tuple_nxv2i8_6t_nxv2i16(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) %val, ptr %base, <vscale x 2 x i16> %index, i64 %vl) { 2009; CHECK-LABEL: test_vsoxseg6_nxv2i8_triscv.vector.tuple_nxv2i8_6t_nxv2i16: 2010; CHECK: # %bb.0: # %entry 2011; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma 2012; CHECK-NEXT: vsoxseg6ei16.v v8, (a0), v14 2013; CHECK-NEXT: ret 2014entry: 2015 tail call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i16(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) %val, ptr %base, <vscale x 2 x i16> %index, i64 %vl, i64 3) 2016 ret void 2017} 2018 2019define void @test_vsoxseg6_mask_nxv2i8_triscv.vector.tuple_nxv2i8_6t_nxv2i16(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) %val, ptr %base, <vscale x 2 x i16> %index, i64 %vl, <vscale x 2 x i1> %mask) { 2020; CHECK-LABEL: test_vsoxseg6_mask_nxv2i8_triscv.vector.tuple_nxv2i8_6t_nxv2i16: 2021; CHECK: # %bb.0: # %entry 2022; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma 2023; CHECK-NEXT: vsoxseg6ei16.v v8, (a0), v14, v0.t 2024; CHECK-NEXT: ret 2025entry: 2026 tail call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i16.nxv2i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 3) 2027 ret void 2028} 2029 2030declare void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i32(target("riscv.vector.tuple", <vscale x 2 x i8>, 6), ptr, <vscale x 2 x i32>, i64, i64) 2031declare void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i32.nxv2i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 6), ptr, <vscale x 2 x i32>, <vscale x 2 x i1>, i64, i64) 2032 2033define void @test_vsoxseg6_nxv2i8_triscv.vector.tuple_nxv2i8_6t_nxv2i32(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) %val, ptr %base, <vscale x 2 x i32> %index, i64 %vl) { 2034; CHECK-LABEL: test_vsoxseg6_nxv2i8_triscv.vector.tuple_nxv2i8_6t_nxv2i32: 2035; CHECK: # %bb.0: # %entry 2036; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma 2037; CHECK-NEXT: vsoxseg6ei32.v v8, (a0), v14 2038; CHECK-NEXT: ret 2039entry: 2040 tail call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i32(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) %val, ptr %base, <vscale x 2 x i32> %index, i64 %vl, i64 3) 2041 ret void 2042} 2043 2044define void @test_vsoxseg6_mask_nxv2i8_triscv.vector.tuple_nxv2i8_6t_nxv2i32(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) %val, ptr %base, <vscale x 2 x i32> %index, i64 %vl, <vscale x 2 x i1> %mask) { 2045; CHECK-LABEL: test_vsoxseg6_mask_nxv2i8_triscv.vector.tuple_nxv2i8_6t_nxv2i32: 2046; CHECK: # %bb.0: # %entry 2047; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma 2048; CHECK-NEXT: vsoxseg6ei32.v v8, (a0), v14, v0.t 2049; CHECK-NEXT: ret 2050entry: 2051 tail call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i32.nxv2i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 3) 2052 ret void 2053} 2054 2055declare void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 6), ptr, <vscale x 2 x i64>, i64, i64) 2056declare void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i64.nxv2i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 6), ptr, <vscale x 2 x i64>, <vscale x 2 x i1>, i64, i64) 2057 2058define void @test_vsoxseg6_nxv2i8_triscv.vector.tuple_nxv2i8_6t_nxv2i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl) { 2059; CHECK-LABEL: test_vsoxseg6_nxv2i8_triscv.vector.tuple_nxv2i8_6t_nxv2i64: 2060; CHECK: # %bb.0: # %entry 2061; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma 2062; CHECK-NEXT: vsoxseg6ei64.v v8, (a0), v14 2063; CHECK-NEXT: ret 2064entry: 2065 tail call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl, i64 3) 2066 ret void 2067} 2068 2069define void @test_vsoxseg6_mask_nxv2i8_triscv.vector.tuple_nxv2i8_6t_nxv2i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl, <vscale x 2 x i1> %mask) { 2070; CHECK-LABEL: test_vsoxseg6_mask_nxv2i8_triscv.vector.tuple_nxv2i8_6t_nxv2i64: 2071; CHECK: # %bb.0: # %entry 2072; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma 2073; CHECK-NEXT: vsoxseg6ei64.v v8, (a0), v14, v0.t 2074; CHECK-NEXT: ret 2075entry: 2076 tail call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i64.nxv2i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) %val, ptr %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 3) 2077 ret void 2078} 2079 2080declare void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 6), ptr, <vscale x 4 x i8>, i64, i64) 2081declare void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i8.nxv4i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 6), ptr, <vscale x 4 x i8>, <vscale x 4 x i1>, i64, i64) 2082 2083define void @test_vsoxseg6_nxv4i8_triscv.vector.tuple_nxv4i8_6t_nxv4i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) %val, ptr %base, <vscale x 4 x i8> %index, i64 %vl) { 2084; CHECK-LABEL: test_vsoxseg6_nxv4i8_triscv.vector.tuple_nxv4i8_6t_nxv4i8: 2085; CHECK: # %bb.0: # %entry 2086; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma 2087; CHECK-NEXT: vsoxseg6ei8.v v8, (a0), v14 2088; CHECK-NEXT: ret 2089entry: 2090 tail call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) %val, ptr %base, <vscale x 4 x i8> %index, i64 %vl, i64 3) 2091 ret void 2092} 2093 2094define void @test_vsoxseg6_mask_nxv4i8_triscv.vector.tuple_nxv4i8_6t_nxv4i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) %val, ptr %base, <vscale x 4 x i8> %index, i64 %vl, <vscale x 4 x i1> %mask) { 2095; CHECK-LABEL: test_vsoxseg6_mask_nxv4i8_triscv.vector.tuple_nxv4i8_6t_nxv4i8: 2096; CHECK: # %bb.0: # %entry 2097; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma 2098; CHECK-NEXT: vsoxseg6ei8.v v8, (a0), v14, v0.t 2099; CHECK-NEXT: ret 2100entry: 2101 tail call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i8.nxv4i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) %val, ptr %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 3) 2102 ret void 2103} 2104 2105declare void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 6), ptr, <vscale x 4 x i16>, i64, i64) 2106declare void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i16.nxv4i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 6), ptr, <vscale x 4 x i16>, <vscale x 4 x i1>, i64, i64) 2107 2108define void @test_vsoxseg6_nxv4i8_triscv.vector.tuple_nxv4i8_6t_nxv4i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) %val, ptr %base, <vscale x 4 x i16> %index, i64 %vl) { 2109; CHECK-LABEL: test_vsoxseg6_nxv4i8_triscv.vector.tuple_nxv4i8_6t_nxv4i16: 2110; CHECK: # %bb.0: # %entry 2111; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma 2112; CHECK-NEXT: vsoxseg6ei16.v v8, (a0), v14 2113; CHECK-NEXT: ret 2114entry: 2115 tail call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) %val, ptr %base, <vscale x 4 x i16> %index, i64 %vl, i64 3) 2116 ret void 2117} 2118 2119define void @test_vsoxseg6_mask_nxv4i8_triscv.vector.tuple_nxv4i8_6t_nxv4i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) %val, ptr %base, <vscale x 4 x i16> %index, i64 %vl, <vscale x 4 x i1> %mask) { 2120; CHECK-LABEL: test_vsoxseg6_mask_nxv4i8_triscv.vector.tuple_nxv4i8_6t_nxv4i16: 2121; CHECK: # %bb.0: # %entry 2122; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma 2123; CHECK-NEXT: vsoxseg6ei16.v v8, (a0), v14, v0.t 2124; CHECK-NEXT: ret 2125entry: 2126 tail call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i16.nxv4i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) %val, ptr %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 3) 2127 ret void 2128} 2129 2130declare void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 6), ptr, <vscale x 4 x i32>, i64, i64) 2131declare void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i32.nxv4i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 6), ptr, <vscale x 4 x i32>, <vscale x 4 x i1>, i64, i64) 2132 2133define void @test_vsoxseg6_nxv4i8_triscv.vector.tuple_nxv4i8_6t_nxv4i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) %val, ptr %base, <vscale x 4 x i32> %index, i64 %vl) { 2134; CHECK-LABEL: test_vsoxseg6_nxv4i8_triscv.vector.tuple_nxv4i8_6t_nxv4i32: 2135; CHECK: # %bb.0: # %entry 2136; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma 2137; CHECK-NEXT: vsoxseg6ei32.v v8, (a0), v14 2138; CHECK-NEXT: ret 2139entry: 2140 tail call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) %val, ptr %base, <vscale x 4 x i32> %index, i64 %vl, i64 3) 2141 ret void 2142} 2143 2144define void @test_vsoxseg6_mask_nxv4i8_triscv.vector.tuple_nxv4i8_6t_nxv4i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) %val, ptr %base, <vscale x 4 x i32> %index, i64 %vl, <vscale x 4 x i1> %mask) { 2145; CHECK-LABEL: test_vsoxseg6_mask_nxv4i8_triscv.vector.tuple_nxv4i8_6t_nxv4i32: 2146; CHECK: # %bb.0: # %entry 2147; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma 2148; CHECK-NEXT: vsoxseg6ei32.v v8, (a0), v14, v0.t 2149; CHECK-NEXT: ret 2150entry: 2151 tail call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i32.nxv4i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 3) 2152 ret void 2153} 2154 2155declare void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 6), ptr, <vscale x 4 x i64>, i64, i64) 2156declare void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i64.nxv4i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 6), ptr, <vscale x 4 x i64>, <vscale x 4 x i1>, i64, i64) 2157 2158define void @test_vsoxseg6_nxv4i8_triscv.vector.tuple_nxv4i8_6t_nxv4i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) %val, ptr %base, <vscale x 4 x i64> %index, i64 %vl) { 2159; CHECK-LABEL: test_vsoxseg6_nxv4i8_triscv.vector.tuple_nxv4i8_6t_nxv4i64: 2160; CHECK: # %bb.0: # %entry 2161; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma 2162; CHECK-NEXT: vsoxseg6ei64.v v8, (a0), v16 2163; CHECK-NEXT: ret 2164entry: 2165 tail call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) %val, ptr %base, <vscale x 4 x i64> %index, i64 %vl, i64 3) 2166 ret void 2167} 2168 2169define void @test_vsoxseg6_mask_nxv4i8_triscv.vector.tuple_nxv4i8_6t_nxv4i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) %val, ptr %base, <vscale x 4 x i64> %index, i64 %vl, <vscale x 4 x i1> %mask) { 2170; CHECK-LABEL: test_vsoxseg6_mask_nxv4i8_triscv.vector.tuple_nxv4i8_6t_nxv4i64: 2171; CHECK: # %bb.0: # %entry 2172; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma 2173; CHECK-NEXT: vsoxseg6ei64.v v8, (a0), v16, v0.t 2174; CHECK-NEXT: ret 2175entry: 2176 tail call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i64.nxv4i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) %val, ptr %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 3) 2177 ret void 2178} 2179 2180declare void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 6), ptr, <vscale x 8 x i8>, i64, i64) 2181declare void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i8.nxv8i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 6), ptr, <vscale x 8 x i8>, <vscale x 8 x i1>, i64, i64) 2182 2183define void @test_vsoxseg6_nxv8i8_triscv.vector.tuple_nxv8i8_6t_nxv8i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, <vscale x 8 x i8> %index, i64 %vl) { 2184; CHECK-LABEL: test_vsoxseg6_nxv8i8_triscv.vector.tuple_nxv8i8_6t_nxv8i8: 2185; CHECK: # %bb.0: # %entry 2186; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma 2187; CHECK-NEXT: vsoxseg6ei8.v v8, (a0), v14 2188; CHECK-NEXT: ret 2189entry: 2190 tail call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, <vscale x 8 x i8> %index, i64 %vl, i64 3) 2191 ret void 2192} 2193 2194define void @test_vsoxseg6_mask_nxv8i8_triscv.vector.tuple_nxv8i8_6t_nxv8i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, <vscale x 8 x i8> %index, i64 %vl, <vscale x 8 x i1> %mask) { 2195; CHECK-LABEL: test_vsoxseg6_mask_nxv8i8_triscv.vector.tuple_nxv8i8_6t_nxv8i8: 2196; CHECK: # %bb.0: # %entry 2197; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma 2198; CHECK-NEXT: vsoxseg6ei8.v v8, (a0), v14, v0.t 2199; CHECK-NEXT: ret 2200entry: 2201 tail call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i8.nxv8i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, <vscale x 8 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 3) 2202 ret void 2203} 2204 2205declare void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 6), ptr, <vscale x 8 x i16>, i64, i64) 2206declare void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i16.nxv8i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 6), ptr, <vscale x 8 x i16>, <vscale x 8 x i1>, i64, i64) 2207 2208define void @test_vsoxseg6_nxv8i8_triscv.vector.tuple_nxv8i8_6t_nxv8i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, <vscale x 8 x i16> %index, i64 %vl) { 2209; CHECK-LABEL: test_vsoxseg6_nxv8i8_triscv.vector.tuple_nxv8i8_6t_nxv8i16: 2210; CHECK: # %bb.0: # %entry 2211; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma 2212; CHECK-NEXT: vsoxseg6ei16.v v8, (a0), v14 2213; CHECK-NEXT: ret 2214entry: 2215 tail call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, <vscale x 8 x i16> %index, i64 %vl, i64 3) 2216 ret void 2217} 2218 2219define void @test_vsoxseg6_mask_nxv8i8_triscv.vector.tuple_nxv8i8_6t_nxv8i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, <vscale x 8 x i16> %index, i64 %vl, <vscale x 8 x i1> %mask) { 2220; CHECK-LABEL: test_vsoxseg6_mask_nxv8i8_triscv.vector.tuple_nxv8i8_6t_nxv8i16: 2221; CHECK: # %bb.0: # %entry 2222; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma 2223; CHECK-NEXT: vsoxseg6ei16.v v8, (a0), v14, v0.t 2224; CHECK-NEXT: ret 2225entry: 2226 tail call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i16.nxv8i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 3) 2227 ret void 2228} 2229 2230declare void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 6), ptr, <vscale x 8 x i32>, i64, i64) 2231declare void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i32.nxv8i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 6), ptr, <vscale x 8 x i32>, <vscale x 8 x i1>, i64, i64) 2232 2233define void @test_vsoxseg6_nxv8i8_triscv.vector.tuple_nxv8i8_6t_nxv8i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, <vscale x 8 x i32> %index, i64 %vl) { 2234; CHECK-LABEL: test_vsoxseg6_nxv8i8_triscv.vector.tuple_nxv8i8_6t_nxv8i32: 2235; CHECK: # %bb.0: # %entry 2236; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma 2237; CHECK-NEXT: vsoxseg6ei32.v v8, (a0), v16 2238; CHECK-NEXT: ret 2239entry: 2240 tail call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, <vscale x 8 x i32> %index, i64 %vl, i64 3) 2241 ret void 2242} 2243 2244define void @test_vsoxseg6_mask_nxv8i8_triscv.vector.tuple_nxv8i8_6t_nxv8i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, <vscale x 8 x i32> %index, i64 %vl, <vscale x 8 x i1> %mask) { 2245; CHECK-LABEL: test_vsoxseg6_mask_nxv8i8_triscv.vector.tuple_nxv8i8_6t_nxv8i32: 2246; CHECK: # %bb.0: # %entry 2247; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma 2248; CHECK-NEXT: vsoxseg6ei32.v v8, (a0), v16, v0.t 2249; CHECK-NEXT: ret 2250entry: 2251 tail call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i32.nxv8i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 3) 2252 ret void 2253} 2254 2255declare void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 6), ptr, <vscale x 8 x i64>, i64, i64) 2256declare void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i64.nxv8i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 6), ptr, <vscale x 8 x i64>, <vscale x 8 x i1>, i64, i64) 2257 2258define void @test_vsoxseg6_nxv8i8_triscv.vector.tuple_nxv8i8_6t_nxv8i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, <vscale x 8 x i64> %index, i64 %vl) { 2259; CHECK-LABEL: test_vsoxseg6_nxv8i8_triscv.vector.tuple_nxv8i8_6t_nxv8i64: 2260; CHECK: # %bb.0: # %entry 2261; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma 2262; CHECK-NEXT: vsoxseg6ei64.v v8, (a0), v16 2263; CHECK-NEXT: ret 2264entry: 2265 tail call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, <vscale x 8 x i64> %index, i64 %vl, i64 3) 2266 ret void 2267} 2268 2269define void @test_vsoxseg6_mask_nxv8i8_triscv.vector.tuple_nxv8i8_6t_nxv8i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, <vscale x 8 x i64> %index, i64 %vl, <vscale x 8 x i1> %mask) { 2270; CHECK-LABEL: test_vsoxseg6_mask_nxv8i8_triscv.vector.tuple_nxv8i8_6t_nxv8i64: 2271; CHECK: # %bb.0: # %entry 2272; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma 2273; CHECK-NEXT: vsoxseg6ei64.v v8, (a0), v16, v0.t 2274; CHECK-NEXT: ret 2275entry: 2276 tail call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i64.nxv8i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, <vscale x 8 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 3) 2277 ret void 2278} 2279 2280declare void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i8(target("riscv.vector.tuple", <vscale x 1 x i8>, 7), ptr, <vscale x 1 x i8>, i64, i64) 2281declare void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i8.nxv1i1(target("riscv.vector.tuple", <vscale x 1 x i8>, 7), ptr, <vscale x 1 x i8>, <vscale x 1 x i1>, i64, i64) 2282 2283define void @test_vsoxseg7_nxv1i8_triscv.vector.tuple_nxv1i8_7t_nxv1i8(target("riscv.vector.tuple", <vscale x 1 x i8>, 7) %val, ptr %base, <vscale x 1 x i8> %index, i64 %vl) { 2284; CHECK-LABEL: test_vsoxseg7_nxv1i8_triscv.vector.tuple_nxv1i8_7t_nxv1i8: 2285; CHECK: # %bb.0: # %entry 2286; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma 2287; CHECK-NEXT: vsoxseg7ei8.v v8, (a0), v15 2288; CHECK-NEXT: ret 2289entry: 2290 tail call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i8(target("riscv.vector.tuple", <vscale x 1 x i8>, 7) %val, ptr %base, <vscale x 1 x i8> %index, i64 %vl, i64 3) 2291 ret void 2292} 2293 2294define void @test_vsoxseg7_mask_nxv1i8_triscv.vector.tuple_nxv1i8_7t_nxv1i8(target("riscv.vector.tuple", <vscale x 1 x i8>, 7) %val, ptr %base, <vscale x 1 x i8> %index, i64 %vl, <vscale x 1 x i1> %mask) { 2295; CHECK-LABEL: test_vsoxseg7_mask_nxv1i8_triscv.vector.tuple_nxv1i8_7t_nxv1i8: 2296; CHECK: # %bb.0: # %entry 2297; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma 2298; CHECK-NEXT: vsoxseg7ei8.v v8, (a0), v15, v0.t 2299; CHECK-NEXT: ret 2300entry: 2301 tail call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i8.nxv1i1(target("riscv.vector.tuple", <vscale x 1 x i8>, 7) %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 3) 2302 ret void 2303} 2304 2305declare void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i16(target("riscv.vector.tuple", <vscale x 1 x i8>, 7), ptr, <vscale x 1 x i16>, i64, i64) 2306declare void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i16.nxv1i1(target("riscv.vector.tuple", <vscale x 1 x i8>, 7), ptr, <vscale x 1 x i16>, <vscale x 1 x i1>, i64, i64) 2307 2308define void @test_vsoxseg7_nxv1i8_triscv.vector.tuple_nxv1i8_7t_nxv1i16(target("riscv.vector.tuple", <vscale x 1 x i8>, 7) %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl) { 2309; CHECK-LABEL: test_vsoxseg7_nxv1i8_triscv.vector.tuple_nxv1i8_7t_nxv1i16: 2310; CHECK: # %bb.0: # %entry 2311; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma 2312; CHECK-NEXT: vsoxseg7ei16.v v8, (a0), v15 2313; CHECK-NEXT: ret 2314entry: 2315 tail call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i16(target("riscv.vector.tuple", <vscale x 1 x i8>, 7) %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl, i64 3) 2316 ret void 2317} 2318 2319define void @test_vsoxseg7_mask_nxv1i8_triscv.vector.tuple_nxv1i8_7t_nxv1i16(target("riscv.vector.tuple", <vscale x 1 x i8>, 7) %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl, <vscale x 1 x i1> %mask) { 2320; CHECK-LABEL: test_vsoxseg7_mask_nxv1i8_triscv.vector.tuple_nxv1i8_7t_nxv1i16: 2321; CHECK: # %bb.0: # %entry 2322; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma 2323; CHECK-NEXT: vsoxseg7ei16.v v8, (a0), v15, v0.t 2324; CHECK-NEXT: ret 2325entry: 2326 tail call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i16.nxv1i1(target("riscv.vector.tuple", <vscale x 1 x i8>, 7) %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 3) 2327 ret void 2328} 2329 2330declare void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i32(target("riscv.vector.tuple", <vscale x 1 x i8>, 7), ptr, <vscale x 1 x i32>, i64, i64) 2331declare void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i32.nxv1i1(target("riscv.vector.tuple", <vscale x 1 x i8>, 7), ptr, <vscale x 1 x i32>, <vscale x 1 x i1>, i64, i64) 2332 2333define void @test_vsoxseg7_nxv1i8_triscv.vector.tuple_nxv1i8_7t_nxv1i32(target("riscv.vector.tuple", <vscale x 1 x i8>, 7) %val, ptr %base, <vscale x 1 x i32> %index, i64 %vl) { 2334; CHECK-LABEL: test_vsoxseg7_nxv1i8_triscv.vector.tuple_nxv1i8_7t_nxv1i32: 2335; CHECK: # %bb.0: # %entry 2336; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma 2337; CHECK-NEXT: vsoxseg7ei32.v v8, (a0), v15 2338; CHECK-NEXT: ret 2339entry: 2340 tail call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i32(target("riscv.vector.tuple", <vscale x 1 x i8>, 7) %val, ptr %base, <vscale x 1 x i32> %index, i64 %vl, i64 3) 2341 ret void 2342} 2343 2344define void @test_vsoxseg7_mask_nxv1i8_triscv.vector.tuple_nxv1i8_7t_nxv1i32(target("riscv.vector.tuple", <vscale x 1 x i8>, 7) %val, ptr %base, <vscale x 1 x i32> %index, i64 %vl, <vscale x 1 x i1> %mask) { 2345; CHECK-LABEL: test_vsoxseg7_mask_nxv1i8_triscv.vector.tuple_nxv1i8_7t_nxv1i32: 2346; CHECK: # %bb.0: # %entry 2347; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma 2348; CHECK-NEXT: vsoxseg7ei32.v v8, (a0), v15, v0.t 2349; CHECK-NEXT: ret 2350entry: 2351 tail call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i32.nxv1i1(target("riscv.vector.tuple", <vscale x 1 x i8>, 7) %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 3) 2352 ret void 2353} 2354 2355declare void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i64(target("riscv.vector.tuple", <vscale x 1 x i8>, 7), ptr, <vscale x 1 x i64>, i64, i64) 2356declare void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i64.nxv1i1(target("riscv.vector.tuple", <vscale x 1 x i8>, 7), ptr, <vscale x 1 x i64>, <vscale x 1 x i1>, i64, i64) 2357 2358define void @test_vsoxseg7_nxv1i8_triscv.vector.tuple_nxv1i8_7t_nxv1i64(target("riscv.vector.tuple", <vscale x 1 x i8>, 7) %val, ptr %base, <vscale x 1 x i64> %index, i64 %vl) { 2359; CHECK-LABEL: test_vsoxseg7_nxv1i8_triscv.vector.tuple_nxv1i8_7t_nxv1i64: 2360; CHECK: # %bb.0: # %entry 2361; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma 2362; CHECK-NEXT: vsoxseg7ei64.v v8, (a0), v15 2363; CHECK-NEXT: ret 2364entry: 2365 tail call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i64(target("riscv.vector.tuple", <vscale x 1 x i8>, 7) %val, ptr %base, <vscale x 1 x i64> %index, i64 %vl, i64 3) 2366 ret void 2367} 2368 2369define void @test_vsoxseg7_mask_nxv1i8_triscv.vector.tuple_nxv1i8_7t_nxv1i64(target("riscv.vector.tuple", <vscale x 1 x i8>, 7) %val, ptr %base, <vscale x 1 x i64> %index, i64 %vl, <vscale x 1 x i1> %mask) { 2370; CHECK-LABEL: test_vsoxseg7_mask_nxv1i8_triscv.vector.tuple_nxv1i8_7t_nxv1i64: 2371; CHECK: # %bb.0: # %entry 2372; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma 2373; CHECK-NEXT: vsoxseg7ei64.v v8, (a0), v15, v0.t 2374; CHECK-NEXT: ret 2375entry: 2376 tail call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i64.nxv1i1(target("riscv.vector.tuple", <vscale x 1 x i8>, 7) %val, ptr %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 3) 2377 ret void 2378} 2379 2380declare void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i8(target("riscv.vector.tuple", <vscale x 2 x i8>, 7), ptr, <vscale x 2 x i8>, i64, i64) 2381declare void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i8.nxv2i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 7), ptr, <vscale x 2 x i8>, <vscale x 2 x i1>, i64, i64) 2382 2383define void @test_vsoxseg7_nxv2i8_triscv.vector.tuple_nxv2i8_7t_nxv2i8(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) %val, ptr %base, <vscale x 2 x i8> %index, i64 %vl) { 2384; CHECK-LABEL: test_vsoxseg7_nxv2i8_triscv.vector.tuple_nxv2i8_7t_nxv2i8: 2385; CHECK: # %bb.0: # %entry 2386; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma 2387; CHECK-NEXT: vsoxseg7ei8.v v8, (a0), v15 2388; CHECK-NEXT: ret 2389entry: 2390 tail call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i8(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) %val, ptr %base, <vscale x 2 x i8> %index, i64 %vl, i64 3) 2391 ret void 2392} 2393 2394define void @test_vsoxseg7_mask_nxv2i8_triscv.vector.tuple_nxv2i8_7t_nxv2i8(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) %val, ptr %base, <vscale x 2 x i8> %index, i64 %vl, <vscale x 2 x i1> %mask) { 2395; CHECK-LABEL: test_vsoxseg7_mask_nxv2i8_triscv.vector.tuple_nxv2i8_7t_nxv2i8: 2396; CHECK: # %bb.0: # %entry 2397; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma 2398; CHECK-NEXT: vsoxseg7ei8.v v8, (a0), v15, v0.t 2399; CHECK-NEXT: ret 2400entry: 2401 tail call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i8.nxv2i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 3) 2402 ret void 2403} 2404 2405declare void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i16(target("riscv.vector.tuple", <vscale x 2 x i8>, 7), ptr, <vscale x 2 x i16>, i64, i64) 2406declare void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i16.nxv2i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 7), ptr, <vscale x 2 x i16>, <vscale x 2 x i1>, i64, i64) 2407 2408define void @test_vsoxseg7_nxv2i8_triscv.vector.tuple_nxv2i8_7t_nxv2i16(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) %val, ptr %base, <vscale x 2 x i16> %index, i64 %vl) { 2409; CHECK-LABEL: test_vsoxseg7_nxv2i8_triscv.vector.tuple_nxv2i8_7t_nxv2i16: 2410; CHECK: # %bb.0: # %entry 2411; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma 2412; CHECK-NEXT: vsoxseg7ei16.v v8, (a0), v15 2413; CHECK-NEXT: ret 2414entry: 2415 tail call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i16(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) %val, ptr %base, <vscale x 2 x i16> %index, i64 %vl, i64 3) 2416 ret void 2417} 2418 2419define void @test_vsoxseg7_mask_nxv2i8_triscv.vector.tuple_nxv2i8_7t_nxv2i16(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) %val, ptr %base, <vscale x 2 x i16> %index, i64 %vl, <vscale x 2 x i1> %mask) { 2420; CHECK-LABEL: test_vsoxseg7_mask_nxv2i8_triscv.vector.tuple_nxv2i8_7t_nxv2i16: 2421; CHECK: # %bb.0: # %entry 2422; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma 2423; CHECK-NEXT: vsoxseg7ei16.v v8, (a0), v15, v0.t 2424; CHECK-NEXT: ret 2425entry: 2426 tail call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i16.nxv2i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 3) 2427 ret void 2428} 2429 2430declare void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i32(target("riscv.vector.tuple", <vscale x 2 x i8>, 7), ptr, <vscale x 2 x i32>, i64, i64) 2431declare void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i32.nxv2i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 7), ptr, <vscale x 2 x i32>, <vscale x 2 x i1>, i64, i64) 2432 2433define void @test_vsoxseg7_nxv2i8_triscv.vector.tuple_nxv2i8_7t_nxv2i32(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) %val, ptr %base, <vscale x 2 x i32> %index, i64 %vl) { 2434; CHECK-LABEL: test_vsoxseg7_nxv2i8_triscv.vector.tuple_nxv2i8_7t_nxv2i32: 2435; CHECK: # %bb.0: # %entry 2436; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma 2437; CHECK-NEXT: vsoxseg7ei32.v v8, (a0), v15 2438; CHECK-NEXT: ret 2439entry: 2440 tail call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i32(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) %val, ptr %base, <vscale x 2 x i32> %index, i64 %vl, i64 3) 2441 ret void 2442} 2443 2444define void @test_vsoxseg7_mask_nxv2i8_triscv.vector.tuple_nxv2i8_7t_nxv2i32(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) %val, ptr %base, <vscale x 2 x i32> %index, i64 %vl, <vscale x 2 x i1> %mask) { 2445; CHECK-LABEL: test_vsoxseg7_mask_nxv2i8_triscv.vector.tuple_nxv2i8_7t_nxv2i32: 2446; CHECK: # %bb.0: # %entry 2447; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma 2448; CHECK-NEXT: vsoxseg7ei32.v v8, (a0), v15, v0.t 2449; CHECK-NEXT: ret 2450entry: 2451 tail call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i32.nxv2i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 3) 2452 ret void 2453} 2454 2455declare void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 7), ptr, <vscale x 2 x i64>, i64, i64) 2456declare void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i64.nxv2i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 7), ptr, <vscale x 2 x i64>, <vscale x 2 x i1>, i64, i64) 2457 2458define void @test_vsoxseg7_nxv2i8_triscv.vector.tuple_nxv2i8_7t_nxv2i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl) { 2459; CHECK-LABEL: test_vsoxseg7_nxv2i8_triscv.vector.tuple_nxv2i8_7t_nxv2i64: 2460; CHECK: # %bb.0: # %entry 2461; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma 2462; CHECK-NEXT: vsoxseg7ei64.v v8, (a0), v16 2463; CHECK-NEXT: ret 2464entry: 2465 tail call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl, i64 3) 2466 ret void 2467} 2468 2469define void @test_vsoxseg7_mask_nxv2i8_triscv.vector.tuple_nxv2i8_7t_nxv2i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl, <vscale x 2 x i1> %mask) { 2470; CHECK-LABEL: test_vsoxseg7_mask_nxv2i8_triscv.vector.tuple_nxv2i8_7t_nxv2i64: 2471; CHECK: # %bb.0: # %entry 2472; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma 2473; CHECK-NEXT: vsoxseg7ei64.v v8, (a0), v16, v0.t 2474; CHECK-NEXT: ret 2475entry: 2476 tail call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i64.nxv2i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) %val, ptr %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 3) 2477 ret void 2478} 2479 2480declare void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 7), ptr, <vscale x 4 x i8>, i64, i64) 2481declare void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i8.nxv4i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 7), ptr, <vscale x 4 x i8>, <vscale x 4 x i1>, i64, i64) 2482 2483define void @test_vsoxseg7_nxv4i8_triscv.vector.tuple_nxv4i8_7t_nxv4i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) %val, ptr %base, <vscale x 4 x i8> %index, i64 %vl) { 2484; CHECK-LABEL: test_vsoxseg7_nxv4i8_triscv.vector.tuple_nxv4i8_7t_nxv4i8: 2485; CHECK: # %bb.0: # %entry 2486; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma 2487; CHECK-NEXT: vsoxseg7ei8.v v8, (a0), v15 2488; CHECK-NEXT: ret 2489entry: 2490 tail call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) %val, ptr %base, <vscale x 4 x i8> %index, i64 %vl, i64 3) 2491 ret void 2492} 2493 2494define void @test_vsoxseg7_mask_nxv4i8_triscv.vector.tuple_nxv4i8_7t_nxv4i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) %val, ptr %base, <vscale x 4 x i8> %index, i64 %vl, <vscale x 4 x i1> %mask) { 2495; CHECK-LABEL: test_vsoxseg7_mask_nxv4i8_triscv.vector.tuple_nxv4i8_7t_nxv4i8: 2496; CHECK: # %bb.0: # %entry 2497; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma 2498; CHECK-NEXT: vsoxseg7ei8.v v8, (a0), v15, v0.t 2499; CHECK-NEXT: ret 2500entry: 2501 tail call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i8.nxv4i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) %val, ptr %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 3) 2502 ret void 2503} 2504 2505declare void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 7), ptr, <vscale x 4 x i16>, i64, i64) 2506declare void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i16.nxv4i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 7), ptr, <vscale x 4 x i16>, <vscale x 4 x i1>, i64, i64) 2507 2508define void @test_vsoxseg7_nxv4i8_triscv.vector.tuple_nxv4i8_7t_nxv4i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) %val, ptr %base, <vscale x 4 x i16> %index, i64 %vl) { 2509; CHECK-LABEL: test_vsoxseg7_nxv4i8_triscv.vector.tuple_nxv4i8_7t_nxv4i16: 2510; CHECK: # %bb.0: # %entry 2511; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma 2512; CHECK-NEXT: vsoxseg7ei16.v v8, (a0), v15 2513; CHECK-NEXT: ret 2514entry: 2515 tail call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) %val, ptr %base, <vscale x 4 x i16> %index, i64 %vl, i64 3) 2516 ret void 2517} 2518 2519define void @test_vsoxseg7_mask_nxv4i8_triscv.vector.tuple_nxv4i8_7t_nxv4i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) %val, ptr %base, <vscale x 4 x i16> %index, i64 %vl, <vscale x 4 x i1> %mask) { 2520; CHECK-LABEL: test_vsoxseg7_mask_nxv4i8_triscv.vector.tuple_nxv4i8_7t_nxv4i16: 2521; CHECK: # %bb.0: # %entry 2522; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma 2523; CHECK-NEXT: vsoxseg7ei16.v v8, (a0), v15, v0.t 2524; CHECK-NEXT: ret 2525entry: 2526 tail call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i16.nxv4i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) %val, ptr %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 3) 2527 ret void 2528} 2529 2530declare void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 7), ptr, <vscale x 4 x i32>, i64, i64) 2531declare void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i32.nxv4i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 7), ptr, <vscale x 4 x i32>, <vscale x 4 x i1>, i64, i64) 2532 2533define void @test_vsoxseg7_nxv4i8_triscv.vector.tuple_nxv4i8_7t_nxv4i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) %val, ptr %base, <vscale x 4 x i32> %index, i64 %vl) { 2534; CHECK-LABEL: test_vsoxseg7_nxv4i8_triscv.vector.tuple_nxv4i8_7t_nxv4i32: 2535; CHECK: # %bb.0: # %entry 2536; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma 2537; CHECK-NEXT: vsoxseg7ei32.v v8, (a0), v16 2538; CHECK-NEXT: ret 2539entry: 2540 tail call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) %val, ptr %base, <vscale x 4 x i32> %index, i64 %vl, i64 3) 2541 ret void 2542} 2543 2544define void @test_vsoxseg7_mask_nxv4i8_triscv.vector.tuple_nxv4i8_7t_nxv4i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) %val, ptr %base, <vscale x 4 x i32> %index, i64 %vl, <vscale x 4 x i1> %mask) { 2545; CHECK-LABEL: test_vsoxseg7_mask_nxv4i8_triscv.vector.tuple_nxv4i8_7t_nxv4i32: 2546; CHECK: # %bb.0: # %entry 2547; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma 2548; CHECK-NEXT: vsoxseg7ei32.v v8, (a0), v16, v0.t 2549; CHECK-NEXT: ret 2550entry: 2551 tail call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i32.nxv4i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 3) 2552 ret void 2553} 2554 2555declare void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 7), ptr, <vscale x 4 x i64>, i64, i64) 2556declare void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i64.nxv4i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 7), ptr, <vscale x 4 x i64>, <vscale x 4 x i1>, i64, i64) 2557 2558define void @test_vsoxseg7_nxv4i8_triscv.vector.tuple_nxv4i8_7t_nxv4i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) %val, ptr %base, <vscale x 4 x i64> %index, i64 %vl) { 2559; CHECK-LABEL: test_vsoxseg7_nxv4i8_triscv.vector.tuple_nxv4i8_7t_nxv4i64: 2560; CHECK: # %bb.0: # %entry 2561; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma 2562; CHECK-NEXT: vsoxseg7ei64.v v8, (a0), v16 2563; CHECK-NEXT: ret 2564entry: 2565 tail call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) %val, ptr %base, <vscale x 4 x i64> %index, i64 %vl, i64 3) 2566 ret void 2567} 2568 2569define void @test_vsoxseg7_mask_nxv4i8_triscv.vector.tuple_nxv4i8_7t_nxv4i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) %val, ptr %base, <vscale x 4 x i64> %index, i64 %vl, <vscale x 4 x i1> %mask) { 2570; CHECK-LABEL: test_vsoxseg7_mask_nxv4i8_triscv.vector.tuple_nxv4i8_7t_nxv4i64: 2571; CHECK: # %bb.0: # %entry 2572; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma 2573; CHECK-NEXT: vsoxseg7ei64.v v8, (a0), v16, v0.t 2574; CHECK-NEXT: ret 2575entry: 2576 tail call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i64.nxv4i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) %val, ptr %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 3) 2577 ret void 2578} 2579 2580declare void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 7), ptr, <vscale x 8 x i8>, i64, i64) 2581declare void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i8.nxv8i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 7), ptr, <vscale x 8 x i8>, <vscale x 8 x i1>, i64, i64) 2582 2583define void @test_vsoxseg7_nxv8i8_triscv.vector.tuple_nxv8i8_7t_nxv8i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, <vscale x 8 x i8> %index, i64 %vl) { 2584; CHECK-LABEL: test_vsoxseg7_nxv8i8_triscv.vector.tuple_nxv8i8_7t_nxv8i8: 2585; CHECK: # %bb.0: # %entry 2586; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma 2587; CHECK-NEXT: vsoxseg7ei8.v v8, (a0), v15 2588; CHECK-NEXT: ret 2589entry: 2590 tail call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, <vscale x 8 x i8> %index, i64 %vl, i64 3) 2591 ret void 2592} 2593 2594define void @test_vsoxseg7_mask_nxv8i8_triscv.vector.tuple_nxv8i8_7t_nxv8i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, <vscale x 8 x i8> %index, i64 %vl, <vscale x 8 x i1> %mask) { 2595; CHECK-LABEL: test_vsoxseg7_mask_nxv8i8_triscv.vector.tuple_nxv8i8_7t_nxv8i8: 2596; CHECK: # %bb.0: # %entry 2597; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma 2598; CHECK-NEXT: vsoxseg7ei8.v v8, (a0), v15, v0.t 2599; CHECK-NEXT: ret 2600entry: 2601 tail call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i8.nxv8i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, <vscale x 8 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 3) 2602 ret void 2603} 2604 2605declare void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 7), ptr, <vscale x 8 x i16>, i64, i64) 2606declare void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i16.nxv8i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 7), ptr, <vscale x 8 x i16>, <vscale x 8 x i1>, i64, i64) 2607 2608define void @test_vsoxseg7_nxv8i8_triscv.vector.tuple_nxv8i8_7t_nxv8i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, <vscale x 8 x i16> %index, i64 %vl) { 2609; CHECK-LABEL: test_vsoxseg7_nxv8i8_triscv.vector.tuple_nxv8i8_7t_nxv8i16: 2610; CHECK: # %bb.0: # %entry 2611; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma 2612; CHECK-NEXT: vsoxseg7ei16.v v8, (a0), v16 2613; CHECK-NEXT: ret 2614entry: 2615 tail call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, <vscale x 8 x i16> %index, i64 %vl, i64 3) 2616 ret void 2617} 2618 2619define void @test_vsoxseg7_mask_nxv8i8_triscv.vector.tuple_nxv8i8_7t_nxv8i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, <vscale x 8 x i16> %index, i64 %vl, <vscale x 8 x i1> %mask) { 2620; CHECK-LABEL: test_vsoxseg7_mask_nxv8i8_triscv.vector.tuple_nxv8i8_7t_nxv8i16: 2621; CHECK: # %bb.0: # %entry 2622; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma 2623; CHECK-NEXT: vsoxseg7ei16.v v8, (a0), v16, v0.t 2624; CHECK-NEXT: ret 2625entry: 2626 tail call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i16.nxv8i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 3) 2627 ret void 2628} 2629 2630declare void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 7), ptr, <vscale x 8 x i32>, i64, i64) 2631declare void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i32.nxv8i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 7), ptr, <vscale x 8 x i32>, <vscale x 8 x i1>, i64, i64) 2632 2633define void @test_vsoxseg7_nxv8i8_triscv.vector.tuple_nxv8i8_7t_nxv8i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, <vscale x 8 x i32> %index, i64 %vl) { 2634; CHECK-LABEL: test_vsoxseg7_nxv8i8_triscv.vector.tuple_nxv8i8_7t_nxv8i32: 2635; CHECK: # %bb.0: # %entry 2636; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma 2637; CHECK-NEXT: vsoxseg7ei32.v v8, (a0), v16 2638; CHECK-NEXT: ret 2639entry: 2640 tail call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, <vscale x 8 x i32> %index, i64 %vl, i64 3) 2641 ret void 2642} 2643 2644define void @test_vsoxseg7_mask_nxv8i8_triscv.vector.tuple_nxv8i8_7t_nxv8i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, <vscale x 8 x i32> %index, i64 %vl, <vscale x 8 x i1> %mask) { 2645; CHECK-LABEL: test_vsoxseg7_mask_nxv8i8_triscv.vector.tuple_nxv8i8_7t_nxv8i32: 2646; CHECK: # %bb.0: # %entry 2647; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma 2648; CHECK-NEXT: vsoxseg7ei32.v v8, (a0), v16, v0.t 2649; CHECK-NEXT: ret 2650entry: 2651 tail call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i32.nxv8i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 3) 2652 ret void 2653} 2654 2655declare void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 7), ptr, <vscale x 8 x i64>, i64, i64) 2656declare void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i64.nxv8i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 7), ptr, <vscale x 8 x i64>, <vscale x 8 x i1>, i64, i64) 2657 2658define void @test_vsoxseg7_nxv8i8_triscv.vector.tuple_nxv8i8_7t_nxv8i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, <vscale x 8 x i64> %index, i64 %vl) { 2659; CHECK-LABEL: test_vsoxseg7_nxv8i8_triscv.vector.tuple_nxv8i8_7t_nxv8i64: 2660; CHECK: # %bb.0: # %entry 2661; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma 2662; CHECK-NEXT: vsoxseg7ei64.v v8, (a0), v16 2663; CHECK-NEXT: ret 2664entry: 2665 tail call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, <vscale x 8 x i64> %index, i64 %vl, i64 3) 2666 ret void 2667} 2668 2669define void @test_vsoxseg7_mask_nxv8i8_triscv.vector.tuple_nxv8i8_7t_nxv8i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, <vscale x 8 x i64> %index, i64 %vl, <vscale x 8 x i1> %mask) { 2670; CHECK-LABEL: test_vsoxseg7_mask_nxv8i8_triscv.vector.tuple_nxv8i8_7t_nxv8i64: 2671; CHECK: # %bb.0: # %entry 2672; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma 2673; CHECK-NEXT: vsoxseg7ei64.v v8, (a0), v16, v0.t 2674; CHECK-NEXT: ret 2675entry: 2676 tail call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i64.nxv8i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, <vscale x 8 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 3) 2677 ret void 2678} 2679 2680declare void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i8(target("riscv.vector.tuple", <vscale x 1 x i8>, 8), ptr, <vscale x 1 x i8>, i64, i64) 2681declare void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i8.nxv1i1(target("riscv.vector.tuple", <vscale x 1 x i8>, 8), ptr, <vscale x 1 x i8>, <vscale x 1 x i1>, i64, i64) 2682 2683define void @test_vsoxseg8_nxv1i8_triscv.vector.tuple_nxv1i8_8t_nxv1i8(target("riscv.vector.tuple", <vscale x 1 x i8>, 8) %val, ptr %base, <vscale x 1 x i8> %index, i64 %vl) { 2684; CHECK-LABEL: test_vsoxseg8_nxv1i8_triscv.vector.tuple_nxv1i8_8t_nxv1i8: 2685; CHECK: # %bb.0: # %entry 2686; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma 2687; CHECK-NEXT: vsoxseg8ei8.v v8, (a0), v16 2688; CHECK-NEXT: ret 2689entry: 2690 tail call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i8(target("riscv.vector.tuple", <vscale x 1 x i8>, 8) %val, ptr %base, <vscale x 1 x i8> %index, i64 %vl, i64 3) 2691 ret void 2692} 2693 2694define void @test_vsoxseg8_mask_nxv1i8_triscv.vector.tuple_nxv1i8_8t_nxv1i8(target("riscv.vector.tuple", <vscale x 1 x i8>, 8) %val, ptr %base, <vscale x 1 x i8> %index, i64 %vl, <vscale x 1 x i1> %mask) { 2695; CHECK-LABEL: test_vsoxseg8_mask_nxv1i8_triscv.vector.tuple_nxv1i8_8t_nxv1i8: 2696; CHECK: # %bb.0: # %entry 2697; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma 2698; CHECK-NEXT: vsoxseg8ei8.v v8, (a0), v16, v0.t 2699; CHECK-NEXT: ret 2700entry: 2701 tail call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i8.nxv1i1(target("riscv.vector.tuple", <vscale x 1 x i8>, 8) %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 3) 2702 ret void 2703} 2704 2705declare void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i16(target("riscv.vector.tuple", <vscale x 1 x i8>, 8), ptr, <vscale x 1 x i16>, i64, i64) 2706declare void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i16.nxv1i1(target("riscv.vector.tuple", <vscale x 1 x i8>, 8), ptr, <vscale x 1 x i16>, <vscale x 1 x i1>, i64, i64) 2707 2708define void @test_vsoxseg8_nxv1i8_triscv.vector.tuple_nxv1i8_8t_nxv1i16(target("riscv.vector.tuple", <vscale x 1 x i8>, 8) %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl) { 2709; CHECK-LABEL: test_vsoxseg8_nxv1i8_triscv.vector.tuple_nxv1i8_8t_nxv1i16: 2710; CHECK: # %bb.0: # %entry 2711; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma 2712; CHECK-NEXT: vsoxseg8ei16.v v8, (a0), v16 2713; CHECK-NEXT: ret 2714entry: 2715 tail call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i16(target("riscv.vector.tuple", <vscale x 1 x i8>, 8) %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl, i64 3) 2716 ret void 2717} 2718 2719define void @test_vsoxseg8_mask_nxv1i8_triscv.vector.tuple_nxv1i8_8t_nxv1i16(target("riscv.vector.tuple", <vscale x 1 x i8>, 8) %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl, <vscale x 1 x i1> %mask) { 2720; CHECK-LABEL: test_vsoxseg8_mask_nxv1i8_triscv.vector.tuple_nxv1i8_8t_nxv1i16: 2721; CHECK: # %bb.0: # %entry 2722; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma 2723; CHECK-NEXT: vsoxseg8ei16.v v8, (a0), v16, v0.t 2724; CHECK-NEXT: ret 2725entry: 2726 tail call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i16.nxv1i1(target("riscv.vector.tuple", <vscale x 1 x i8>, 8) %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 3) 2727 ret void 2728} 2729 2730declare void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i32(target("riscv.vector.tuple", <vscale x 1 x i8>, 8), ptr, <vscale x 1 x i32>, i64, i64) 2731declare void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i32.nxv1i1(target("riscv.vector.tuple", <vscale x 1 x i8>, 8), ptr, <vscale x 1 x i32>, <vscale x 1 x i1>, i64, i64) 2732 2733define void @test_vsoxseg8_nxv1i8_triscv.vector.tuple_nxv1i8_8t_nxv1i32(target("riscv.vector.tuple", <vscale x 1 x i8>, 8) %val, ptr %base, <vscale x 1 x i32> %index, i64 %vl) { 2734; CHECK-LABEL: test_vsoxseg8_nxv1i8_triscv.vector.tuple_nxv1i8_8t_nxv1i32: 2735; CHECK: # %bb.0: # %entry 2736; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma 2737; CHECK-NEXT: vsoxseg8ei32.v v8, (a0), v16 2738; CHECK-NEXT: ret 2739entry: 2740 tail call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i32(target("riscv.vector.tuple", <vscale x 1 x i8>, 8) %val, ptr %base, <vscale x 1 x i32> %index, i64 %vl, i64 3) 2741 ret void 2742} 2743 2744define void @test_vsoxseg8_mask_nxv1i8_triscv.vector.tuple_nxv1i8_8t_nxv1i32(target("riscv.vector.tuple", <vscale x 1 x i8>, 8) %val, ptr %base, <vscale x 1 x i32> %index, i64 %vl, <vscale x 1 x i1> %mask) { 2745; CHECK-LABEL: test_vsoxseg8_mask_nxv1i8_triscv.vector.tuple_nxv1i8_8t_nxv1i32: 2746; CHECK: # %bb.0: # %entry 2747; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma 2748; CHECK-NEXT: vsoxseg8ei32.v v8, (a0), v16, v0.t 2749; CHECK-NEXT: ret 2750entry: 2751 tail call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i32.nxv1i1(target("riscv.vector.tuple", <vscale x 1 x i8>, 8) %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 3) 2752 ret void 2753} 2754 2755declare void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i64(target("riscv.vector.tuple", <vscale x 1 x i8>, 8), ptr, <vscale x 1 x i64>, i64, i64) 2756declare void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i64.nxv1i1(target("riscv.vector.tuple", <vscale x 1 x i8>, 8), ptr, <vscale x 1 x i64>, <vscale x 1 x i1>, i64, i64) 2757 2758define void @test_vsoxseg8_nxv1i8_triscv.vector.tuple_nxv1i8_8t_nxv1i64(target("riscv.vector.tuple", <vscale x 1 x i8>, 8) %val, ptr %base, <vscale x 1 x i64> %index, i64 %vl) { 2759; CHECK-LABEL: test_vsoxseg8_nxv1i8_triscv.vector.tuple_nxv1i8_8t_nxv1i64: 2760; CHECK: # %bb.0: # %entry 2761; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma 2762; CHECK-NEXT: vsoxseg8ei64.v v8, (a0), v16 2763; CHECK-NEXT: ret 2764entry: 2765 tail call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i64(target("riscv.vector.tuple", <vscale x 1 x i8>, 8) %val, ptr %base, <vscale x 1 x i64> %index, i64 %vl, i64 3) 2766 ret void 2767} 2768 2769define void @test_vsoxseg8_mask_nxv1i8_triscv.vector.tuple_nxv1i8_8t_nxv1i64(target("riscv.vector.tuple", <vscale x 1 x i8>, 8) %val, ptr %base, <vscale x 1 x i64> %index, i64 %vl, <vscale x 1 x i1> %mask) { 2770; CHECK-LABEL: test_vsoxseg8_mask_nxv1i8_triscv.vector.tuple_nxv1i8_8t_nxv1i64: 2771; CHECK: # %bb.0: # %entry 2772; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma 2773; CHECK-NEXT: vsoxseg8ei64.v v8, (a0), v16, v0.t 2774; CHECK-NEXT: ret 2775entry: 2776 tail call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i64.nxv1i1(target("riscv.vector.tuple", <vscale x 1 x i8>, 8) %val, ptr %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 3) 2777 ret void 2778} 2779 2780declare void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i8(target("riscv.vector.tuple", <vscale x 2 x i8>, 8), ptr, <vscale x 2 x i8>, i64, i64) 2781declare void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i8.nxv2i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 8), ptr, <vscale x 2 x i8>, <vscale x 2 x i1>, i64, i64) 2782 2783define void @test_vsoxseg8_nxv2i8_triscv.vector.tuple_nxv2i8_8t_nxv2i8(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) %val, ptr %base, <vscale x 2 x i8> %index, i64 %vl) { 2784; CHECK-LABEL: test_vsoxseg8_nxv2i8_triscv.vector.tuple_nxv2i8_8t_nxv2i8: 2785; CHECK: # %bb.0: # %entry 2786; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma 2787; CHECK-NEXT: vsoxseg8ei8.v v8, (a0), v16 2788; CHECK-NEXT: ret 2789entry: 2790 tail call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i8(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) %val, ptr %base, <vscale x 2 x i8> %index, i64 %vl, i64 3) 2791 ret void 2792} 2793 2794define void @test_vsoxseg8_mask_nxv2i8_triscv.vector.tuple_nxv2i8_8t_nxv2i8(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) %val, ptr %base, <vscale x 2 x i8> %index, i64 %vl, <vscale x 2 x i1> %mask) { 2795; CHECK-LABEL: test_vsoxseg8_mask_nxv2i8_triscv.vector.tuple_nxv2i8_8t_nxv2i8: 2796; CHECK: # %bb.0: # %entry 2797; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma 2798; CHECK-NEXT: vsoxseg8ei8.v v8, (a0), v16, v0.t 2799; CHECK-NEXT: ret 2800entry: 2801 tail call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i8.nxv2i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 3) 2802 ret void 2803} 2804 2805declare void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i16(target("riscv.vector.tuple", <vscale x 2 x i8>, 8), ptr, <vscale x 2 x i16>, i64, i64) 2806declare void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i16.nxv2i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 8), ptr, <vscale x 2 x i16>, <vscale x 2 x i1>, i64, i64) 2807 2808define void @test_vsoxseg8_nxv2i8_triscv.vector.tuple_nxv2i8_8t_nxv2i16(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) %val, ptr %base, <vscale x 2 x i16> %index, i64 %vl) { 2809; CHECK-LABEL: test_vsoxseg8_nxv2i8_triscv.vector.tuple_nxv2i8_8t_nxv2i16: 2810; CHECK: # %bb.0: # %entry 2811; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma 2812; CHECK-NEXT: vsoxseg8ei16.v v8, (a0), v16 2813; CHECK-NEXT: ret 2814entry: 2815 tail call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i16(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) %val, ptr %base, <vscale x 2 x i16> %index, i64 %vl, i64 3) 2816 ret void 2817} 2818 2819define void @test_vsoxseg8_mask_nxv2i8_triscv.vector.tuple_nxv2i8_8t_nxv2i16(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) %val, ptr %base, <vscale x 2 x i16> %index, i64 %vl, <vscale x 2 x i1> %mask) { 2820; CHECK-LABEL: test_vsoxseg8_mask_nxv2i8_triscv.vector.tuple_nxv2i8_8t_nxv2i16: 2821; CHECK: # %bb.0: # %entry 2822; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma 2823; CHECK-NEXT: vsoxseg8ei16.v v8, (a0), v16, v0.t 2824; CHECK-NEXT: ret 2825entry: 2826 tail call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i16.nxv2i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 3) 2827 ret void 2828} 2829 2830declare void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i32(target("riscv.vector.tuple", <vscale x 2 x i8>, 8), ptr, <vscale x 2 x i32>, i64, i64) 2831declare void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i32.nxv2i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 8), ptr, <vscale x 2 x i32>, <vscale x 2 x i1>, i64, i64) 2832 2833define void @test_vsoxseg8_nxv2i8_triscv.vector.tuple_nxv2i8_8t_nxv2i32(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) %val, ptr %base, <vscale x 2 x i32> %index, i64 %vl) { 2834; CHECK-LABEL: test_vsoxseg8_nxv2i8_triscv.vector.tuple_nxv2i8_8t_nxv2i32: 2835; CHECK: # %bb.0: # %entry 2836; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma 2837; CHECK-NEXT: vsoxseg8ei32.v v8, (a0), v16 2838; CHECK-NEXT: ret 2839entry: 2840 tail call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i32(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) %val, ptr %base, <vscale x 2 x i32> %index, i64 %vl, i64 3) 2841 ret void 2842} 2843 2844define void @test_vsoxseg8_mask_nxv2i8_triscv.vector.tuple_nxv2i8_8t_nxv2i32(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) %val, ptr %base, <vscale x 2 x i32> %index, i64 %vl, <vscale x 2 x i1> %mask) { 2845; CHECK-LABEL: test_vsoxseg8_mask_nxv2i8_triscv.vector.tuple_nxv2i8_8t_nxv2i32: 2846; CHECK: # %bb.0: # %entry 2847; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma 2848; CHECK-NEXT: vsoxseg8ei32.v v8, (a0), v16, v0.t 2849; CHECK-NEXT: ret 2850entry: 2851 tail call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i32.nxv2i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 3) 2852 ret void 2853} 2854 2855declare void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 8), ptr, <vscale x 2 x i64>, i64, i64) 2856declare void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i64.nxv2i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 8), ptr, <vscale x 2 x i64>, <vscale x 2 x i1>, i64, i64) 2857 2858define void @test_vsoxseg8_nxv2i8_triscv.vector.tuple_nxv2i8_8t_nxv2i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl) { 2859; CHECK-LABEL: test_vsoxseg8_nxv2i8_triscv.vector.tuple_nxv2i8_8t_nxv2i64: 2860; CHECK: # %bb.0: # %entry 2861; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma 2862; CHECK-NEXT: vsoxseg8ei64.v v8, (a0), v16 2863; CHECK-NEXT: ret 2864entry: 2865 tail call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl, i64 3) 2866 ret void 2867} 2868 2869define void @test_vsoxseg8_mask_nxv2i8_triscv.vector.tuple_nxv2i8_8t_nxv2i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl, <vscale x 2 x i1> %mask) { 2870; CHECK-LABEL: test_vsoxseg8_mask_nxv2i8_triscv.vector.tuple_nxv2i8_8t_nxv2i64: 2871; CHECK: # %bb.0: # %entry 2872; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma 2873; CHECK-NEXT: vsoxseg8ei64.v v8, (a0), v16, v0.t 2874; CHECK-NEXT: ret 2875entry: 2876 tail call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i64.nxv2i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) %val, ptr %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 3) 2877 ret void 2878} 2879 2880declare void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 8), ptr, <vscale x 4 x i8>, i64, i64) 2881declare void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i8.nxv4i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 8), ptr, <vscale x 4 x i8>, <vscale x 4 x i1>, i64, i64) 2882 2883define void @test_vsoxseg8_nxv4i8_triscv.vector.tuple_nxv4i8_8t_nxv4i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %val, ptr %base, <vscale x 4 x i8> %index, i64 %vl) { 2884; CHECK-LABEL: test_vsoxseg8_nxv4i8_triscv.vector.tuple_nxv4i8_8t_nxv4i8: 2885; CHECK: # %bb.0: # %entry 2886; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma 2887; CHECK-NEXT: vsoxseg8ei8.v v8, (a0), v16 2888; CHECK-NEXT: ret 2889entry: 2890 tail call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %val, ptr %base, <vscale x 4 x i8> %index, i64 %vl, i64 3) 2891 ret void 2892} 2893 2894define void @test_vsoxseg8_mask_nxv4i8_triscv.vector.tuple_nxv4i8_8t_nxv4i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %val, ptr %base, <vscale x 4 x i8> %index, i64 %vl, <vscale x 4 x i1> %mask) { 2895; CHECK-LABEL: test_vsoxseg8_mask_nxv4i8_triscv.vector.tuple_nxv4i8_8t_nxv4i8: 2896; CHECK: # %bb.0: # %entry 2897; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma 2898; CHECK-NEXT: vsoxseg8ei8.v v8, (a0), v16, v0.t 2899; CHECK-NEXT: ret 2900entry: 2901 tail call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i8.nxv4i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %val, ptr %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 3) 2902 ret void 2903} 2904 2905declare void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 8), ptr, <vscale x 4 x i16>, i64, i64) 2906declare void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i16.nxv4i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 8), ptr, <vscale x 4 x i16>, <vscale x 4 x i1>, i64, i64) 2907 2908define void @test_vsoxseg8_nxv4i8_triscv.vector.tuple_nxv4i8_8t_nxv4i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %val, ptr %base, <vscale x 4 x i16> %index, i64 %vl) { 2909; CHECK-LABEL: test_vsoxseg8_nxv4i8_triscv.vector.tuple_nxv4i8_8t_nxv4i16: 2910; CHECK: # %bb.0: # %entry 2911; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma 2912; CHECK-NEXT: vsoxseg8ei16.v v8, (a0), v16 2913; CHECK-NEXT: ret 2914entry: 2915 tail call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %val, ptr %base, <vscale x 4 x i16> %index, i64 %vl, i64 3) 2916 ret void 2917} 2918 2919define void @test_vsoxseg8_mask_nxv4i8_triscv.vector.tuple_nxv4i8_8t_nxv4i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %val, ptr %base, <vscale x 4 x i16> %index, i64 %vl, <vscale x 4 x i1> %mask) { 2920; CHECK-LABEL: test_vsoxseg8_mask_nxv4i8_triscv.vector.tuple_nxv4i8_8t_nxv4i16: 2921; CHECK: # %bb.0: # %entry 2922; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma 2923; CHECK-NEXT: vsoxseg8ei16.v v8, (a0), v16, v0.t 2924; CHECK-NEXT: ret 2925entry: 2926 tail call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i16.nxv4i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %val, ptr %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 3) 2927 ret void 2928} 2929 2930declare void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 8), ptr, <vscale x 4 x i32>, i64, i64) 2931declare void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i32.nxv4i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 8), ptr, <vscale x 4 x i32>, <vscale x 4 x i1>, i64, i64) 2932 2933define void @test_vsoxseg8_nxv4i8_triscv.vector.tuple_nxv4i8_8t_nxv4i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %val, ptr %base, <vscale x 4 x i32> %index, i64 %vl) { 2934; CHECK-LABEL: test_vsoxseg8_nxv4i8_triscv.vector.tuple_nxv4i8_8t_nxv4i32: 2935; CHECK: # %bb.0: # %entry 2936; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma 2937; CHECK-NEXT: vsoxseg8ei32.v v8, (a0), v16 2938; CHECK-NEXT: ret 2939entry: 2940 tail call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %val, ptr %base, <vscale x 4 x i32> %index, i64 %vl, i64 3) 2941 ret void 2942} 2943 2944define void @test_vsoxseg8_mask_nxv4i8_triscv.vector.tuple_nxv4i8_8t_nxv4i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %val, ptr %base, <vscale x 4 x i32> %index, i64 %vl, <vscale x 4 x i1> %mask) { 2945; CHECK-LABEL: test_vsoxseg8_mask_nxv4i8_triscv.vector.tuple_nxv4i8_8t_nxv4i32: 2946; CHECK: # %bb.0: # %entry 2947; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma 2948; CHECK-NEXT: vsoxseg8ei32.v v8, (a0), v16, v0.t 2949; CHECK-NEXT: ret 2950entry: 2951 tail call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i32.nxv4i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 3) 2952 ret void 2953} 2954 2955declare void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 8), ptr, <vscale x 4 x i64>, i64, i64) 2956declare void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i64.nxv4i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 8), ptr, <vscale x 4 x i64>, <vscale x 4 x i1>, i64, i64) 2957 2958define void @test_vsoxseg8_nxv4i8_triscv.vector.tuple_nxv4i8_8t_nxv4i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %val, ptr %base, <vscale x 4 x i64> %index, i64 %vl) { 2959; CHECK-LABEL: test_vsoxseg8_nxv4i8_triscv.vector.tuple_nxv4i8_8t_nxv4i64: 2960; CHECK: # %bb.0: # %entry 2961; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma 2962; CHECK-NEXT: vsoxseg8ei64.v v8, (a0), v16 2963; CHECK-NEXT: ret 2964entry: 2965 tail call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %val, ptr %base, <vscale x 4 x i64> %index, i64 %vl, i64 3) 2966 ret void 2967} 2968 2969define void @test_vsoxseg8_mask_nxv4i8_triscv.vector.tuple_nxv4i8_8t_nxv4i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %val, ptr %base, <vscale x 4 x i64> %index, i64 %vl, <vscale x 4 x i1> %mask) { 2970; CHECK-LABEL: test_vsoxseg8_mask_nxv4i8_triscv.vector.tuple_nxv4i8_8t_nxv4i64: 2971; CHECK: # %bb.0: # %entry 2972; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma 2973; CHECK-NEXT: vsoxseg8ei64.v v8, (a0), v16, v0.t 2974; CHECK-NEXT: ret 2975entry: 2976 tail call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i64.nxv4i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %val, ptr %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 3) 2977 ret void 2978} 2979 2980declare void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 8), ptr, <vscale x 8 x i8>, i64, i64) 2981declare void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i8.nxv8i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 8), ptr, <vscale x 8 x i8>, <vscale x 8 x i1>, i64, i64) 2982 2983define void @test_vsoxseg8_nxv8i8_triscv.vector.tuple_nxv8i8_8t_nxv8i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, <vscale x 8 x i8> %index, i64 %vl) { 2984; CHECK-LABEL: test_vsoxseg8_nxv8i8_triscv.vector.tuple_nxv8i8_8t_nxv8i8: 2985; CHECK: # %bb.0: # %entry 2986; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma 2987; CHECK-NEXT: vsoxseg8ei8.v v8, (a0), v16 2988; CHECK-NEXT: ret 2989entry: 2990 tail call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, <vscale x 8 x i8> %index, i64 %vl, i64 3) 2991 ret void 2992} 2993 2994define void @test_vsoxseg8_mask_nxv8i8_triscv.vector.tuple_nxv8i8_8t_nxv8i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, <vscale x 8 x i8> %index, i64 %vl, <vscale x 8 x i1> %mask) { 2995; CHECK-LABEL: test_vsoxseg8_mask_nxv8i8_triscv.vector.tuple_nxv8i8_8t_nxv8i8: 2996; CHECK: # %bb.0: # %entry 2997; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma 2998; CHECK-NEXT: vsoxseg8ei8.v v8, (a0), v16, v0.t 2999; CHECK-NEXT: ret 3000entry: 3001 tail call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i8.nxv8i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, <vscale x 8 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 3) 3002 ret void 3003} 3004 3005declare void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 8), ptr, <vscale x 8 x i16>, i64, i64) 3006declare void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i16.nxv8i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 8), ptr, <vscale x 8 x i16>, <vscale x 8 x i1>, i64, i64) 3007 3008define void @test_vsoxseg8_nxv8i8_triscv.vector.tuple_nxv8i8_8t_nxv8i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, <vscale x 8 x i16> %index, i64 %vl) { 3009; CHECK-LABEL: test_vsoxseg8_nxv8i8_triscv.vector.tuple_nxv8i8_8t_nxv8i16: 3010; CHECK: # %bb.0: # %entry 3011; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma 3012; CHECK-NEXT: vsoxseg8ei16.v v8, (a0), v16 3013; CHECK-NEXT: ret 3014entry: 3015 tail call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, <vscale x 8 x i16> %index, i64 %vl, i64 3) 3016 ret void 3017} 3018 3019define void @test_vsoxseg8_mask_nxv8i8_triscv.vector.tuple_nxv8i8_8t_nxv8i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, <vscale x 8 x i16> %index, i64 %vl, <vscale x 8 x i1> %mask) { 3020; CHECK-LABEL: test_vsoxseg8_mask_nxv8i8_triscv.vector.tuple_nxv8i8_8t_nxv8i16: 3021; CHECK: # %bb.0: # %entry 3022; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma 3023; CHECK-NEXT: vsoxseg8ei16.v v8, (a0), v16, v0.t 3024; CHECK-NEXT: ret 3025entry: 3026 tail call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i16.nxv8i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 3) 3027 ret void 3028} 3029 3030declare void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 8), ptr, <vscale x 8 x i32>, i64, i64) 3031declare void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i32.nxv8i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 8), ptr, <vscale x 8 x i32>, <vscale x 8 x i1>, i64, i64) 3032 3033define void @test_vsoxseg8_nxv8i8_triscv.vector.tuple_nxv8i8_8t_nxv8i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, <vscale x 8 x i32> %index, i64 %vl) { 3034; CHECK-LABEL: test_vsoxseg8_nxv8i8_triscv.vector.tuple_nxv8i8_8t_nxv8i32: 3035; CHECK: # %bb.0: # %entry 3036; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma 3037; CHECK-NEXT: vsoxseg8ei32.v v8, (a0), v16 3038; CHECK-NEXT: ret 3039entry: 3040 tail call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, <vscale x 8 x i32> %index, i64 %vl, i64 3) 3041 ret void 3042} 3043 3044define void @test_vsoxseg8_mask_nxv8i8_triscv.vector.tuple_nxv8i8_8t_nxv8i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, <vscale x 8 x i32> %index, i64 %vl, <vscale x 8 x i1> %mask) { 3045; CHECK-LABEL: test_vsoxseg8_mask_nxv8i8_triscv.vector.tuple_nxv8i8_8t_nxv8i32: 3046; CHECK: # %bb.0: # %entry 3047; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma 3048; CHECK-NEXT: vsoxseg8ei32.v v8, (a0), v16, v0.t 3049; CHECK-NEXT: ret 3050entry: 3051 tail call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i32.nxv8i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 3) 3052 ret void 3053} 3054 3055declare void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 8), ptr, <vscale x 8 x i64>, i64, i64) 3056declare void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i64.nxv8i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 8), ptr, <vscale x 8 x i64>, <vscale x 8 x i1>, i64, i64) 3057 3058define void @test_vsoxseg8_nxv8i8_triscv.vector.tuple_nxv8i8_8t_nxv8i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, <vscale x 8 x i64> %index, i64 %vl) { 3059; CHECK-LABEL: test_vsoxseg8_nxv8i8_triscv.vector.tuple_nxv8i8_8t_nxv8i64: 3060; CHECK: # %bb.0: # %entry 3061; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma 3062; CHECK-NEXT: vsoxseg8ei64.v v8, (a0), v16 3063; CHECK-NEXT: ret 3064entry: 3065 tail call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, <vscale x 8 x i64> %index, i64 %vl, i64 3) 3066 ret void 3067} 3068 3069define void @test_vsoxseg8_mask_nxv8i8_triscv.vector.tuple_nxv8i8_8t_nxv8i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, <vscale x 8 x i64> %index, i64 %vl, <vscale x 8 x i1> %mask) { 3070; CHECK-LABEL: test_vsoxseg8_mask_nxv8i8_triscv.vector.tuple_nxv8i8_8t_nxv8i64: 3071; CHECK: # %bb.0: # %entry 3072; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma 3073; CHECK-NEXT: vsoxseg8ei64.v v8, (a0), v16, v0.t 3074; CHECK-NEXT: ret 3075entry: 3076 tail call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i64.nxv8i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, <vscale x 8 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 3) 3077 ret void 3078} 3079 3080declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i8(target("riscv.vector.tuple", <vscale x 2 x i8>, 2), ptr, <vscale x 1 x i8>, i64, i64) 3081declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 2), ptr, <vscale x 1 x i8>, <vscale x 1 x i1>, i64, i64) 3082 3083define void @test_vsoxseg2_nxv1i16_triscv.vector.tuple_nxv2i8_2t_nxv1i8(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) %val, ptr %base, <vscale x 1 x i8> %index, i64 %vl) { 3084; CHECK-LABEL: test_vsoxseg2_nxv1i16_triscv.vector.tuple_nxv2i8_2t_nxv1i8: 3085; CHECK: # %bb.0: # %entry 3086; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma 3087; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10 3088; CHECK-NEXT: ret 3089entry: 3090 tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i8(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) %val, ptr %base, <vscale x 1 x i8> %index, i64 %vl, i64 4) 3091 ret void 3092} 3093 3094define void @test_vsoxseg2_mask_nxv1i16_triscv.vector.tuple_nxv2i8_2t_nxv1i8(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) %val, ptr %base, <vscale x 1 x i8> %index, i64 %vl, <vscale x 1 x i1> %mask) { 3095; CHECK-LABEL: test_vsoxseg2_mask_nxv1i16_triscv.vector.tuple_nxv2i8_2t_nxv1i8: 3096; CHECK: # %bb.0: # %entry 3097; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma 3098; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10, v0.t 3099; CHECK-NEXT: ret 3100entry: 3101 tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 4) 3102 ret void 3103} 3104 3105declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i16(target("riscv.vector.tuple", <vscale x 2 x i8>, 2), ptr, <vscale x 1 x i16>, i64, i64) 3106declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 2), ptr, <vscale x 1 x i16>, <vscale x 1 x i1>, i64, i64) 3107 3108define void @test_vsoxseg2_nxv1i16_triscv.vector.tuple_nxv2i8_2t_nxv1i16(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl) { 3109; CHECK-LABEL: test_vsoxseg2_nxv1i16_triscv.vector.tuple_nxv2i8_2t_nxv1i16: 3110; CHECK: # %bb.0: # %entry 3111; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma 3112; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10 3113; CHECK-NEXT: ret 3114entry: 3115 tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i16(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl, i64 4) 3116 ret void 3117} 3118 3119define void @test_vsoxseg2_mask_nxv1i16_triscv.vector.tuple_nxv2i8_2t_nxv1i16(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl, <vscale x 1 x i1> %mask) { 3120; CHECK-LABEL: test_vsoxseg2_mask_nxv1i16_triscv.vector.tuple_nxv2i8_2t_nxv1i16: 3121; CHECK: # %bb.0: # %entry 3122; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma 3123; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10, v0.t 3124; CHECK-NEXT: ret 3125entry: 3126 tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 4) 3127 ret void 3128} 3129 3130declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i32(target("riscv.vector.tuple", <vscale x 2 x i8>, 2), ptr, <vscale x 1 x i32>, i64, i64) 3131declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 2), ptr, <vscale x 1 x i32>, <vscale x 1 x i1>, i64, i64) 3132 3133define void @test_vsoxseg2_nxv1i16_triscv.vector.tuple_nxv2i8_2t_nxv1i32(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) %val, ptr %base, <vscale x 1 x i32> %index, i64 %vl) { 3134; CHECK-LABEL: test_vsoxseg2_nxv1i16_triscv.vector.tuple_nxv2i8_2t_nxv1i32: 3135; CHECK: # %bb.0: # %entry 3136; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma 3137; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10 3138; CHECK-NEXT: ret 3139entry: 3140 tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i32(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) %val, ptr %base, <vscale x 1 x i32> %index, i64 %vl, i64 4) 3141 ret void 3142} 3143 3144define void @test_vsoxseg2_mask_nxv1i16_triscv.vector.tuple_nxv2i8_2t_nxv1i32(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) %val, ptr %base, <vscale x 1 x i32> %index, i64 %vl, <vscale x 1 x i1> %mask) { 3145; CHECK-LABEL: test_vsoxseg2_mask_nxv1i16_triscv.vector.tuple_nxv2i8_2t_nxv1i32: 3146; CHECK: # %bb.0: # %entry 3147; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma 3148; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10, v0.t 3149; CHECK-NEXT: ret 3150entry: 3151 tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 4) 3152 ret void 3153} 3154 3155declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 2), ptr, <vscale x 1 x i64>, i64, i64) 3156declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i64.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 2), ptr, <vscale x 1 x i64>, <vscale x 1 x i1>, i64, i64) 3157 3158define void @test_vsoxseg2_nxv1i16_triscv.vector.tuple_nxv2i8_2t_nxv1i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) %val, ptr %base, <vscale x 1 x i64> %index, i64 %vl) { 3159; CHECK-LABEL: test_vsoxseg2_nxv1i16_triscv.vector.tuple_nxv2i8_2t_nxv1i64: 3160; CHECK: # %bb.0: # %entry 3161; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma 3162; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v10 3163; CHECK-NEXT: ret 3164entry: 3165 tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) %val, ptr %base, <vscale x 1 x i64> %index, i64 %vl, i64 4) 3166 ret void 3167} 3168 3169define void @test_vsoxseg2_mask_nxv1i16_triscv.vector.tuple_nxv2i8_2t_nxv1i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) %val, ptr %base, <vscale x 1 x i64> %index, i64 %vl, <vscale x 1 x i1> %mask) { 3170; CHECK-LABEL: test_vsoxseg2_mask_nxv1i16_triscv.vector.tuple_nxv2i8_2t_nxv1i64: 3171; CHECK: # %bb.0: # %entry 3172; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma 3173; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v10, v0.t 3174; CHECK-NEXT: ret 3175entry: 3176 tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i64.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) %val, ptr %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 4) 3177 ret void 3178} 3179 3180declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 2), ptr, <vscale x 2 x i8>, i64, i64) 3181declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 2), ptr, <vscale x 2 x i8>, <vscale x 2 x i1>, i64, i64) 3182 3183define void @test_vsoxseg2_nxv2i16_triscv.vector.tuple_nxv4i8_2t_nxv2i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %val, ptr %base, <vscale x 2 x i8> %index, i64 %vl) { 3184; CHECK-LABEL: test_vsoxseg2_nxv2i16_triscv.vector.tuple_nxv4i8_2t_nxv2i8: 3185; CHECK: # %bb.0: # %entry 3186; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma 3187; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10 3188; CHECK-NEXT: ret 3189entry: 3190 tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %val, ptr %base, <vscale x 2 x i8> %index, i64 %vl, i64 4) 3191 ret void 3192} 3193 3194define void @test_vsoxseg2_mask_nxv2i16_triscv.vector.tuple_nxv4i8_2t_nxv2i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %val, ptr %base, <vscale x 2 x i8> %index, i64 %vl, <vscale x 2 x i1> %mask) { 3195; CHECK-LABEL: test_vsoxseg2_mask_nxv2i16_triscv.vector.tuple_nxv4i8_2t_nxv2i8: 3196; CHECK: # %bb.0: # %entry 3197; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma 3198; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10, v0.t 3199; CHECK-NEXT: ret 3200entry: 3201 tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 4) 3202 ret void 3203} 3204 3205declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 2), ptr, <vscale x 2 x i16>, i64, i64) 3206declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 2), ptr, <vscale x 2 x i16>, <vscale x 2 x i1>, i64, i64) 3207 3208define void @test_vsoxseg2_nxv2i16_triscv.vector.tuple_nxv4i8_2t_nxv2i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %val, ptr %base, <vscale x 2 x i16> %index, i64 %vl) { 3209; CHECK-LABEL: test_vsoxseg2_nxv2i16_triscv.vector.tuple_nxv4i8_2t_nxv2i16: 3210; CHECK: # %bb.0: # %entry 3211; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma 3212; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10 3213; CHECK-NEXT: ret 3214entry: 3215 tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %val, ptr %base, <vscale x 2 x i16> %index, i64 %vl, i64 4) 3216 ret void 3217} 3218 3219define void @test_vsoxseg2_mask_nxv2i16_triscv.vector.tuple_nxv4i8_2t_nxv2i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %val, ptr %base, <vscale x 2 x i16> %index, i64 %vl, <vscale x 2 x i1> %mask) { 3220; CHECK-LABEL: test_vsoxseg2_mask_nxv2i16_triscv.vector.tuple_nxv4i8_2t_nxv2i16: 3221; CHECK: # %bb.0: # %entry 3222; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma 3223; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10, v0.t 3224; CHECK-NEXT: ret 3225entry: 3226 tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 4) 3227 ret void 3228} 3229 3230declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 2), ptr, <vscale x 2 x i32>, i64, i64) 3231declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 2), ptr, <vscale x 2 x i32>, <vscale x 2 x i1>, i64, i64) 3232 3233define void @test_vsoxseg2_nxv2i16_triscv.vector.tuple_nxv4i8_2t_nxv2i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %val, ptr %base, <vscale x 2 x i32> %index, i64 %vl) { 3234; CHECK-LABEL: test_vsoxseg2_nxv2i16_triscv.vector.tuple_nxv4i8_2t_nxv2i32: 3235; CHECK: # %bb.0: # %entry 3236; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma 3237; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10 3238; CHECK-NEXT: ret 3239entry: 3240 tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %val, ptr %base, <vscale x 2 x i32> %index, i64 %vl, i64 4) 3241 ret void 3242} 3243 3244define void @test_vsoxseg2_mask_nxv2i16_triscv.vector.tuple_nxv4i8_2t_nxv2i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %val, ptr %base, <vscale x 2 x i32> %index, i64 %vl, <vscale x 2 x i1> %mask) { 3245; CHECK-LABEL: test_vsoxseg2_mask_nxv2i16_triscv.vector.tuple_nxv4i8_2t_nxv2i32: 3246; CHECK: # %bb.0: # %entry 3247; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma 3248; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10, v0.t 3249; CHECK-NEXT: ret 3250entry: 3251 tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 4) 3252 ret void 3253} 3254 3255declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 2), ptr, <vscale x 2 x i64>, i64, i64) 3256declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i64.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 2), ptr, <vscale x 2 x i64>, <vscale x 2 x i1>, i64, i64) 3257 3258define void @test_vsoxseg2_nxv2i16_triscv.vector.tuple_nxv4i8_2t_nxv2i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl) { 3259; CHECK-LABEL: test_vsoxseg2_nxv2i16_triscv.vector.tuple_nxv4i8_2t_nxv2i64: 3260; CHECK: # %bb.0: # %entry 3261; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma 3262; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v10 3263; CHECK-NEXT: ret 3264entry: 3265 tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl, i64 4) 3266 ret void 3267} 3268 3269define void @test_vsoxseg2_mask_nxv2i16_triscv.vector.tuple_nxv4i8_2t_nxv2i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl, <vscale x 2 x i1> %mask) { 3270; CHECK-LABEL: test_vsoxseg2_mask_nxv2i16_triscv.vector.tuple_nxv4i8_2t_nxv2i64: 3271; CHECK: # %bb.0: # %entry 3272; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma 3273; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v10, v0.t 3274; CHECK-NEXT: ret 3275entry: 3276 tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i64.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %val, ptr %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 4) 3277 ret void 3278} 3279 3280declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 2), ptr, <vscale x 4 x i8>, i64, i64) 3281declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 2), ptr, <vscale x 4 x i8>, <vscale x 4 x i1>, i64, i64) 3282 3283define void @test_vsoxseg2_nxv4i16_triscv.vector.tuple_nxv8i8_2t_nxv4i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, <vscale x 4 x i8> %index, i64 %vl) { 3284; CHECK-LABEL: test_vsoxseg2_nxv4i16_triscv.vector.tuple_nxv8i8_2t_nxv4i8: 3285; CHECK: # %bb.0: # %entry 3286; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma 3287; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10 3288; CHECK-NEXT: ret 3289entry: 3290 tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, <vscale x 4 x i8> %index, i64 %vl, i64 4) 3291 ret void 3292} 3293 3294define void @test_vsoxseg2_mask_nxv4i16_triscv.vector.tuple_nxv8i8_2t_nxv4i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, <vscale x 4 x i8> %index, i64 %vl, <vscale x 4 x i1> %mask) { 3295; CHECK-LABEL: test_vsoxseg2_mask_nxv4i16_triscv.vector.tuple_nxv8i8_2t_nxv4i8: 3296; CHECK: # %bb.0: # %entry 3297; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma 3298; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10, v0.t 3299; CHECK-NEXT: ret 3300entry: 3301 tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 4) 3302 ret void 3303} 3304 3305declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 2), ptr, <vscale x 4 x i16>, i64, i64) 3306declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 2), ptr, <vscale x 4 x i16>, <vscale x 4 x i1>, i64, i64) 3307 3308define void @test_vsoxseg2_nxv4i16_triscv.vector.tuple_nxv8i8_2t_nxv4i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, <vscale x 4 x i16> %index, i64 %vl) { 3309; CHECK-LABEL: test_vsoxseg2_nxv4i16_triscv.vector.tuple_nxv8i8_2t_nxv4i16: 3310; CHECK: # %bb.0: # %entry 3311; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma 3312; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10 3313; CHECK-NEXT: ret 3314entry: 3315 tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, <vscale x 4 x i16> %index, i64 %vl, i64 4) 3316 ret void 3317} 3318 3319define void @test_vsoxseg2_mask_nxv4i16_triscv.vector.tuple_nxv8i8_2t_nxv4i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, <vscale x 4 x i16> %index, i64 %vl, <vscale x 4 x i1> %mask) { 3320; CHECK-LABEL: test_vsoxseg2_mask_nxv4i16_triscv.vector.tuple_nxv8i8_2t_nxv4i16: 3321; CHECK: # %bb.0: # %entry 3322; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma 3323; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10, v0.t 3324; CHECK-NEXT: ret 3325entry: 3326 tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 4) 3327 ret void 3328} 3329 3330declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 2), ptr, <vscale x 4 x i32>, i64, i64) 3331declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 2), ptr, <vscale x 4 x i32>, <vscale x 4 x i1>, i64, i64) 3332 3333define void @test_vsoxseg2_nxv4i16_triscv.vector.tuple_nxv8i8_2t_nxv4i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, <vscale x 4 x i32> %index, i64 %vl) { 3334; CHECK-LABEL: test_vsoxseg2_nxv4i16_triscv.vector.tuple_nxv8i8_2t_nxv4i32: 3335; CHECK: # %bb.0: # %entry 3336; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma 3337; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10 3338; CHECK-NEXT: ret 3339entry: 3340 tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, <vscale x 4 x i32> %index, i64 %vl, i64 4) 3341 ret void 3342} 3343 3344define void @test_vsoxseg2_mask_nxv4i16_triscv.vector.tuple_nxv8i8_2t_nxv4i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, <vscale x 4 x i32> %index, i64 %vl, <vscale x 4 x i1> %mask) { 3345; CHECK-LABEL: test_vsoxseg2_mask_nxv4i16_triscv.vector.tuple_nxv8i8_2t_nxv4i32: 3346; CHECK: # %bb.0: # %entry 3347; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma 3348; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10, v0.t 3349; CHECK-NEXT: ret 3350entry: 3351 tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 4) 3352 ret void 3353} 3354 3355declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 2), ptr, <vscale x 4 x i64>, i64, i64) 3356declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i64.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 2), ptr, <vscale x 4 x i64>, <vscale x 4 x i1>, i64, i64) 3357 3358define void @test_vsoxseg2_nxv4i16_triscv.vector.tuple_nxv8i8_2t_nxv4i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, <vscale x 4 x i64> %index, i64 %vl) { 3359; CHECK-LABEL: test_vsoxseg2_nxv4i16_triscv.vector.tuple_nxv8i8_2t_nxv4i64: 3360; CHECK: # %bb.0: # %entry 3361; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma 3362; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v12 3363; CHECK-NEXT: ret 3364entry: 3365 tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, <vscale x 4 x i64> %index, i64 %vl, i64 4) 3366 ret void 3367} 3368 3369define void @test_vsoxseg2_mask_nxv4i16_triscv.vector.tuple_nxv8i8_2t_nxv4i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, <vscale x 4 x i64> %index, i64 %vl, <vscale x 4 x i1> %mask) { 3370; CHECK-LABEL: test_vsoxseg2_mask_nxv4i16_triscv.vector.tuple_nxv8i8_2t_nxv4i64: 3371; CHECK: # %bb.0: # %entry 3372; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma 3373; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v12, v0.t 3374; CHECK-NEXT: ret 3375entry: 3376 tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i64.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 4) 3377 ret void 3378} 3379 3380declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i8(target("riscv.vector.tuple", <vscale x 16 x i8>, 2), ptr, <vscale x 8 x i8>, i64, i64) 3381declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 2), ptr, <vscale x 8 x i8>, <vscale x 8 x i1>, i64, i64) 3382 3383define void @test_vsoxseg2_nxv8i16_triscv.vector.tuple_nxv16i8_2t_nxv8i8(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, <vscale x 8 x i8> %index, i64 %vl) { 3384; CHECK-LABEL: test_vsoxseg2_nxv8i16_triscv.vector.tuple_nxv16i8_2t_nxv8i8: 3385; CHECK: # %bb.0: # %entry 3386; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma 3387; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v12 3388; CHECK-NEXT: ret 3389entry: 3390 tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i8(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, <vscale x 8 x i8> %index, i64 %vl, i64 4) 3391 ret void 3392} 3393 3394define void @test_vsoxseg2_mask_nxv8i16_triscv.vector.tuple_nxv16i8_2t_nxv8i8(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, <vscale x 8 x i8> %index, i64 %vl, <vscale x 8 x i1> %mask) { 3395; CHECK-LABEL: test_vsoxseg2_mask_nxv8i16_triscv.vector.tuple_nxv16i8_2t_nxv8i8: 3396; CHECK: # %bb.0: # %entry 3397; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma 3398; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v12, v0.t 3399; CHECK-NEXT: ret 3400entry: 3401 tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, <vscale x 8 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 4) 3402 ret void 3403} 3404 3405declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i16(target("riscv.vector.tuple", <vscale x 16 x i8>, 2), ptr, <vscale x 8 x i16>, i64, i64) 3406declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 2), ptr, <vscale x 8 x i16>, <vscale x 8 x i1>, i64, i64) 3407 3408define void @test_vsoxseg2_nxv8i16_triscv.vector.tuple_nxv16i8_2t_nxv8i16(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, <vscale x 8 x i16> %index, i64 %vl) { 3409; CHECK-LABEL: test_vsoxseg2_nxv8i16_triscv.vector.tuple_nxv16i8_2t_nxv8i16: 3410; CHECK: # %bb.0: # %entry 3411; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma 3412; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v12 3413; CHECK-NEXT: ret 3414entry: 3415 tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i16(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, <vscale x 8 x i16> %index, i64 %vl, i64 4) 3416 ret void 3417} 3418 3419define void @test_vsoxseg2_mask_nxv8i16_triscv.vector.tuple_nxv16i8_2t_nxv8i16(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, <vscale x 8 x i16> %index, i64 %vl, <vscale x 8 x i1> %mask) { 3420; CHECK-LABEL: test_vsoxseg2_mask_nxv8i16_triscv.vector.tuple_nxv16i8_2t_nxv8i16: 3421; CHECK: # %bb.0: # %entry 3422; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma 3423; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v12, v0.t 3424; CHECK-NEXT: ret 3425entry: 3426 tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 4) 3427 ret void 3428} 3429 3430declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i32(target("riscv.vector.tuple", <vscale x 16 x i8>, 2), ptr, <vscale x 8 x i32>, i64, i64) 3431declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 2), ptr, <vscale x 8 x i32>, <vscale x 8 x i1>, i64, i64) 3432 3433define void @test_vsoxseg2_nxv8i16_triscv.vector.tuple_nxv16i8_2t_nxv8i32(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, <vscale x 8 x i32> %index, i64 %vl) { 3434; CHECK-LABEL: test_vsoxseg2_nxv8i16_triscv.vector.tuple_nxv16i8_2t_nxv8i32: 3435; CHECK: # %bb.0: # %entry 3436; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma 3437; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v12 3438; CHECK-NEXT: ret 3439entry: 3440 tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i32(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, <vscale x 8 x i32> %index, i64 %vl, i64 4) 3441 ret void 3442} 3443 3444define void @test_vsoxseg2_mask_nxv8i16_triscv.vector.tuple_nxv16i8_2t_nxv8i32(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, <vscale x 8 x i32> %index, i64 %vl, <vscale x 8 x i1> %mask) { 3445; CHECK-LABEL: test_vsoxseg2_mask_nxv8i16_triscv.vector.tuple_nxv16i8_2t_nxv8i32: 3446; CHECK: # %bb.0: # %entry 3447; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma 3448; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v12, v0.t 3449; CHECK-NEXT: ret 3450entry: 3451 tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 4) 3452 ret void 3453} 3454 3455declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 2), ptr, <vscale x 8 x i64>, i64, i64) 3456declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i64.nxv8i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 2), ptr, <vscale x 8 x i64>, <vscale x 8 x i1>, i64, i64) 3457 3458define void @test_vsoxseg2_nxv8i16_triscv.vector.tuple_nxv16i8_2t_nxv8i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, <vscale x 8 x i64> %index, i64 %vl) { 3459; CHECK-LABEL: test_vsoxseg2_nxv8i16_triscv.vector.tuple_nxv16i8_2t_nxv8i64: 3460; CHECK: # %bb.0: # %entry 3461; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma 3462; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v16 3463; CHECK-NEXT: ret 3464entry: 3465 tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, <vscale x 8 x i64> %index, i64 %vl, i64 4) 3466 ret void 3467} 3468 3469define void @test_vsoxseg2_mask_nxv8i16_triscv.vector.tuple_nxv16i8_2t_nxv8i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, <vscale x 8 x i64> %index, i64 %vl, <vscale x 8 x i1> %mask) { 3470; CHECK-LABEL: test_vsoxseg2_mask_nxv8i16_triscv.vector.tuple_nxv16i8_2t_nxv8i64: 3471; CHECK: # %bb.0: # %entry 3472; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma 3473; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v16, v0.t 3474; CHECK-NEXT: ret 3475entry: 3476 tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i64.nxv8i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, <vscale x 8 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 4) 3477 ret void 3478} 3479 3480declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i8(target("riscv.vector.tuple", <vscale x 32 x i8>, 2), ptr, <vscale x 16 x i8>, i64, i64) 3481declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1(target("riscv.vector.tuple", <vscale x 32 x i8>, 2), ptr, <vscale x 16 x i8>, <vscale x 16 x i1>, i64, i64) 3482 3483define void @test_vsoxseg2_nxv16i16_triscv.vector.tuple_nxv32i8_2t_nxv16i8(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, <vscale x 16 x i8> %index, i64 %vl) { 3484; CHECK-LABEL: test_vsoxseg2_nxv16i16_triscv.vector.tuple_nxv32i8_2t_nxv16i8: 3485; CHECK: # %bb.0: # %entry 3486; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma 3487; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v16 3488; CHECK-NEXT: ret 3489entry: 3490 tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i8(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, <vscale x 16 x i8> %index, i64 %vl, i64 4) 3491 ret void 3492} 3493 3494define void @test_vsoxseg2_mask_nxv16i16_triscv.vector.tuple_nxv32i8_2t_nxv16i8(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, <vscale x 16 x i8> %index, i64 %vl, <vscale x 16 x i1> %mask) { 3495; CHECK-LABEL: test_vsoxseg2_mask_nxv16i16_triscv.vector.tuple_nxv32i8_2t_nxv16i8: 3496; CHECK: # %bb.0: # %entry 3497; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma 3498; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v16, v0.t 3499; CHECK-NEXT: ret 3500entry: 3501 tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, <vscale x 16 x i8> %index, <vscale x 16 x i1> %mask, i64 %vl, i64 4) 3502 ret void 3503} 3504 3505declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i16(target("riscv.vector.tuple", <vscale x 32 x i8>, 2), ptr, <vscale x 16 x i16>, i64, i64) 3506declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1(target("riscv.vector.tuple", <vscale x 32 x i8>, 2), ptr, <vscale x 16 x i16>, <vscale x 16 x i1>, i64, i64) 3507 3508define void @test_vsoxseg2_nxv16i16_triscv.vector.tuple_nxv32i8_2t_nxv16i16(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, <vscale x 16 x i16> %index, i64 %vl) { 3509; CHECK-LABEL: test_vsoxseg2_nxv16i16_triscv.vector.tuple_nxv32i8_2t_nxv16i16: 3510; CHECK: # %bb.0: # %entry 3511; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma 3512; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v16 3513; CHECK-NEXT: ret 3514entry: 3515 tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i16(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, <vscale x 16 x i16> %index, i64 %vl, i64 4) 3516 ret void 3517} 3518 3519define void @test_vsoxseg2_mask_nxv16i16_triscv.vector.tuple_nxv32i8_2t_nxv16i16(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, <vscale x 16 x i16> %index, i64 %vl, <vscale x 16 x i1> %mask) { 3520; CHECK-LABEL: test_vsoxseg2_mask_nxv16i16_triscv.vector.tuple_nxv32i8_2t_nxv16i16: 3521; CHECK: # %bb.0: # %entry 3522; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma 3523; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v16, v0.t 3524; CHECK-NEXT: ret 3525entry: 3526 tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, <vscale x 16 x i16> %index, <vscale x 16 x i1> %mask, i64 %vl, i64 4) 3527 ret void 3528} 3529 3530declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i32(target("riscv.vector.tuple", <vscale x 32 x i8>, 2), ptr, <vscale x 16 x i32>, i64, i64) 3531declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1(target("riscv.vector.tuple", <vscale x 32 x i8>, 2), ptr, <vscale x 16 x i32>, <vscale x 16 x i1>, i64, i64) 3532 3533define void @test_vsoxseg2_nxv16i16_triscv.vector.tuple_nxv32i8_2t_nxv16i32(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, <vscale x 16 x i32> %index, i64 %vl) { 3534; CHECK-LABEL: test_vsoxseg2_nxv16i16_triscv.vector.tuple_nxv32i8_2t_nxv16i32: 3535; CHECK: # %bb.0: # %entry 3536; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma 3537; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v16 3538; CHECK-NEXT: ret 3539entry: 3540 tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i32(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, <vscale x 16 x i32> %index, i64 %vl, i64 4) 3541 ret void 3542} 3543 3544define void @test_vsoxseg2_mask_nxv16i16_triscv.vector.tuple_nxv32i8_2t_nxv16i32(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, <vscale x 16 x i32> %index, i64 %vl, <vscale x 16 x i1> %mask) { 3545; CHECK-LABEL: test_vsoxseg2_mask_nxv16i16_triscv.vector.tuple_nxv32i8_2t_nxv16i32: 3546; CHECK: # %bb.0: # %entry 3547; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma 3548; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v16, v0.t 3549; CHECK-NEXT: ret 3550entry: 3551 tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, <vscale x 16 x i32> %index, <vscale x 16 x i1> %mask, i64 %vl, i64 4) 3552 ret void 3553} 3554 3555declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i8(target("riscv.vector.tuple", <vscale x 2 x i8>, 3), ptr, <vscale x 1 x i8>, i64, i64) 3556declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 3), ptr, <vscale x 1 x i8>, <vscale x 1 x i1>, i64, i64) 3557 3558define void @test_vsoxseg3_nxv1i16_triscv.vector.tuple_nxv2i8_3t_nxv1i8(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) %val, ptr %base, <vscale x 1 x i8> %index, i64 %vl) { 3559; CHECK-LABEL: test_vsoxseg3_nxv1i16_triscv.vector.tuple_nxv2i8_3t_nxv1i8: 3560; CHECK: # %bb.0: # %entry 3561; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma 3562; CHECK-NEXT: vsoxseg3ei8.v v8, (a0), v11 3563; CHECK-NEXT: ret 3564entry: 3565 tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i8(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) %val, ptr %base, <vscale x 1 x i8> %index, i64 %vl, i64 4) 3566 ret void 3567} 3568 3569define void @test_vsoxseg3_mask_nxv1i16_triscv.vector.tuple_nxv2i8_3t_nxv1i8(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) %val, ptr %base, <vscale x 1 x i8> %index, i64 %vl, <vscale x 1 x i1> %mask) { 3570; CHECK-LABEL: test_vsoxseg3_mask_nxv1i16_triscv.vector.tuple_nxv2i8_3t_nxv1i8: 3571; CHECK: # %bb.0: # %entry 3572; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma 3573; CHECK-NEXT: vsoxseg3ei8.v v8, (a0), v11, v0.t 3574; CHECK-NEXT: ret 3575entry: 3576 tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 4) 3577 ret void 3578} 3579 3580declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i16(target("riscv.vector.tuple", <vscale x 2 x i8>, 3), ptr, <vscale x 1 x i16>, i64, i64) 3581declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 3), ptr, <vscale x 1 x i16>, <vscale x 1 x i1>, i64, i64) 3582 3583define void @test_vsoxseg3_nxv1i16_triscv.vector.tuple_nxv2i8_3t_nxv1i16(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl) { 3584; CHECK-LABEL: test_vsoxseg3_nxv1i16_triscv.vector.tuple_nxv2i8_3t_nxv1i16: 3585; CHECK: # %bb.0: # %entry 3586; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma 3587; CHECK-NEXT: vsoxseg3ei16.v v8, (a0), v11 3588; CHECK-NEXT: ret 3589entry: 3590 tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i16(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl, i64 4) 3591 ret void 3592} 3593 3594define void @test_vsoxseg3_mask_nxv1i16_triscv.vector.tuple_nxv2i8_3t_nxv1i16(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl, <vscale x 1 x i1> %mask) { 3595; CHECK-LABEL: test_vsoxseg3_mask_nxv1i16_triscv.vector.tuple_nxv2i8_3t_nxv1i16: 3596; CHECK: # %bb.0: # %entry 3597; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma 3598; CHECK-NEXT: vsoxseg3ei16.v v8, (a0), v11, v0.t 3599; CHECK-NEXT: ret 3600entry: 3601 tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 4) 3602 ret void 3603} 3604 3605declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i32(target("riscv.vector.tuple", <vscale x 2 x i8>, 3), ptr, <vscale x 1 x i32>, i64, i64) 3606declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 3), ptr, <vscale x 1 x i32>, <vscale x 1 x i1>, i64, i64) 3607 3608define void @test_vsoxseg3_nxv1i16_triscv.vector.tuple_nxv2i8_3t_nxv1i32(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) %val, ptr %base, <vscale x 1 x i32> %index, i64 %vl) { 3609; CHECK-LABEL: test_vsoxseg3_nxv1i16_triscv.vector.tuple_nxv2i8_3t_nxv1i32: 3610; CHECK: # %bb.0: # %entry 3611; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma 3612; CHECK-NEXT: vsoxseg3ei32.v v8, (a0), v11 3613; CHECK-NEXT: ret 3614entry: 3615 tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i32(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) %val, ptr %base, <vscale x 1 x i32> %index, i64 %vl, i64 4) 3616 ret void 3617} 3618 3619define void @test_vsoxseg3_mask_nxv1i16_triscv.vector.tuple_nxv2i8_3t_nxv1i32(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) %val, ptr %base, <vscale x 1 x i32> %index, i64 %vl, <vscale x 1 x i1> %mask) { 3620; CHECK-LABEL: test_vsoxseg3_mask_nxv1i16_triscv.vector.tuple_nxv2i8_3t_nxv1i32: 3621; CHECK: # %bb.0: # %entry 3622; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma 3623; CHECK-NEXT: vsoxseg3ei32.v v8, (a0), v11, v0.t 3624; CHECK-NEXT: ret 3625entry: 3626 tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 4) 3627 ret void 3628} 3629 3630declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 3), ptr, <vscale x 1 x i64>, i64, i64) 3631declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i64.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 3), ptr, <vscale x 1 x i64>, <vscale x 1 x i1>, i64, i64) 3632 3633define void @test_vsoxseg3_nxv1i16_triscv.vector.tuple_nxv2i8_3t_nxv1i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) %val, ptr %base, <vscale x 1 x i64> %index, i64 %vl) { 3634; CHECK-LABEL: test_vsoxseg3_nxv1i16_triscv.vector.tuple_nxv2i8_3t_nxv1i64: 3635; CHECK: # %bb.0: # %entry 3636; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma 3637; CHECK-NEXT: vsoxseg3ei64.v v8, (a0), v11 3638; CHECK-NEXT: ret 3639entry: 3640 tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) %val, ptr %base, <vscale x 1 x i64> %index, i64 %vl, i64 4) 3641 ret void 3642} 3643 3644define void @test_vsoxseg3_mask_nxv1i16_triscv.vector.tuple_nxv2i8_3t_nxv1i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) %val, ptr %base, <vscale x 1 x i64> %index, i64 %vl, <vscale x 1 x i1> %mask) { 3645; CHECK-LABEL: test_vsoxseg3_mask_nxv1i16_triscv.vector.tuple_nxv2i8_3t_nxv1i64: 3646; CHECK: # %bb.0: # %entry 3647; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma 3648; CHECK-NEXT: vsoxseg3ei64.v v8, (a0), v11, v0.t 3649; CHECK-NEXT: ret 3650entry: 3651 tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i64.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) %val, ptr %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 4) 3652 ret void 3653} 3654 3655declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 3), ptr, <vscale x 2 x i8>, i64, i64) 3656declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 3), ptr, <vscale x 2 x i8>, <vscale x 2 x i1>, i64, i64) 3657 3658define void @test_vsoxseg3_nxv2i16_triscv.vector.tuple_nxv4i8_3t_nxv2i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %val, ptr %base, <vscale x 2 x i8> %index, i64 %vl) { 3659; CHECK-LABEL: test_vsoxseg3_nxv2i16_triscv.vector.tuple_nxv4i8_3t_nxv2i8: 3660; CHECK: # %bb.0: # %entry 3661; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma 3662; CHECK-NEXT: vsoxseg3ei8.v v8, (a0), v11 3663; CHECK-NEXT: ret 3664entry: 3665 tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %val, ptr %base, <vscale x 2 x i8> %index, i64 %vl, i64 4) 3666 ret void 3667} 3668 3669define void @test_vsoxseg3_mask_nxv2i16_triscv.vector.tuple_nxv4i8_3t_nxv2i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %val, ptr %base, <vscale x 2 x i8> %index, i64 %vl, <vscale x 2 x i1> %mask) { 3670; CHECK-LABEL: test_vsoxseg3_mask_nxv2i16_triscv.vector.tuple_nxv4i8_3t_nxv2i8: 3671; CHECK: # %bb.0: # %entry 3672; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma 3673; CHECK-NEXT: vsoxseg3ei8.v v8, (a0), v11, v0.t 3674; CHECK-NEXT: ret 3675entry: 3676 tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 4) 3677 ret void 3678} 3679 3680declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 3), ptr, <vscale x 2 x i16>, i64, i64) 3681declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 3), ptr, <vscale x 2 x i16>, <vscale x 2 x i1>, i64, i64) 3682 3683define void @test_vsoxseg3_nxv2i16_triscv.vector.tuple_nxv4i8_3t_nxv2i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %val, ptr %base, <vscale x 2 x i16> %index, i64 %vl) { 3684; CHECK-LABEL: test_vsoxseg3_nxv2i16_triscv.vector.tuple_nxv4i8_3t_nxv2i16: 3685; CHECK: # %bb.0: # %entry 3686; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma 3687; CHECK-NEXT: vsoxseg3ei16.v v8, (a0), v11 3688; CHECK-NEXT: ret 3689entry: 3690 tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %val, ptr %base, <vscale x 2 x i16> %index, i64 %vl, i64 4) 3691 ret void 3692} 3693 3694define void @test_vsoxseg3_mask_nxv2i16_triscv.vector.tuple_nxv4i8_3t_nxv2i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %val, ptr %base, <vscale x 2 x i16> %index, i64 %vl, <vscale x 2 x i1> %mask) { 3695; CHECK-LABEL: test_vsoxseg3_mask_nxv2i16_triscv.vector.tuple_nxv4i8_3t_nxv2i16: 3696; CHECK: # %bb.0: # %entry 3697; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma 3698; CHECK-NEXT: vsoxseg3ei16.v v8, (a0), v11, v0.t 3699; CHECK-NEXT: ret 3700entry: 3701 tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 4) 3702 ret void 3703} 3704 3705declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 3), ptr, <vscale x 2 x i32>, i64, i64) 3706declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 3), ptr, <vscale x 2 x i32>, <vscale x 2 x i1>, i64, i64) 3707 3708define void @test_vsoxseg3_nxv2i16_triscv.vector.tuple_nxv4i8_3t_nxv2i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %val, ptr %base, <vscale x 2 x i32> %index, i64 %vl) { 3709; CHECK-LABEL: test_vsoxseg3_nxv2i16_triscv.vector.tuple_nxv4i8_3t_nxv2i32: 3710; CHECK: # %bb.0: # %entry 3711; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma 3712; CHECK-NEXT: vsoxseg3ei32.v v8, (a0), v11 3713; CHECK-NEXT: ret 3714entry: 3715 tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %val, ptr %base, <vscale x 2 x i32> %index, i64 %vl, i64 4) 3716 ret void 3717} 3718 3719define void @test_vsoxseg3_mask_nxv2i16_triscv.vector.tuple_nxv4i8_3t_nxv2i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %val, ptr %base, <vscale x 2 x i32> %index, i64 %vl, <vscale x 2 x i1> %mask) { 3720; CHECK-LABEL: test_vsoxseg3_mask_nxv2i16_triscv.vector.tuple_nxv4i8_3t_nxv2i32: 3721; CHECK: # %bb.0: # %entry 3722; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma 3723; CHECK-NEXT: vsoxseg3ei32.v v8, (a0), v11, v0.t 3724; CHECK-NEXT: ret 3725entry: 3726 tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 4) 3727 ret void 3728} 3729 3730declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 3), ptr, <vscale x 2 x i64>, i64, i64) 3731declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i64.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 3), ptr, <vscale x 2 x i64>, <vscale x 2 x i1>, i64, i64) 3732 3733define void @test_vsoxseg3_nxv2i16_triscv.vector.tuple_nxv4i8_3t_nxv2i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl) { 3734; CHECK-LABEL: test_vsoxseg3_nxv2i16_triscv.vector.tuple_nxv4i8_3t_nxv2i64: 3735; CHECK: # %bb.0: # %entry 3736; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma 3737; CHECK-NEXT: vsoxseg3ei64.v v8, (a0), v12 3738; CHECK-NEXT: ret 3739entry: 3740 tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl, i64 4) 3741 ret void 3742} 3743 3744define void @test_vsoxseg3_mask_nxv2i16_triscv.vector.tuple_nxv4i8_3t_nxv2i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl, <vscale x 2 x i1> %mask) { 3745; CHECK-LABEL: test_vsoxseg3_mask_nxv2i16_triscv.vector.tuple_nxv4i8_3t_nxv2i64: 3746; CHECK: # %bb.0: # %entry 3747; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma 3748; CHECK-NEXT: vsoxseg3ei64.v v8, (a0), v12, v0.t 3749; CHECK-NEXT: ret 3750entry: 3751 tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i64.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %val, ptr %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 4) 3752 ret void 3753} 3754 3755declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 3), ptr, <vscale x 4 x i8>, i64, i64) 3756declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 3), ptr, <vscale x 4 x i8>, <vscale x 4 x i1>, i64, i64) 3757 3758define void @test_vsoxseg3_nxv4i16_triscv.vector.tuple_nxv8i8_3t_nxv4i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, <vscale x 4 x i8> %index, i64 %vl) { 3759; CHECK-LABEL: test_vsoxseg3_nxv4i16_triscv.vector.tuple_nxv8i8_3t_nxv4i8: 3760; CHECK: # %bb.0: # %entry 3761; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma 3762; CHECK-NEXT: vsoxseg3ei8.v v8, (a0), v11 3763; CHECK-NEXT: ret 3764entry: 3765 tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, <vscale x 4 x i8> %index, i64 %vl, i64 4) 3766 ret void 3767} 3768 3769define void @test_vsoxseg3_mask_nxv4i16_triscv.vector.tuple_nxv8i8_3t_nxv4i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, <vscale x 4 x i8> %index, i64 %vl, <vscale x 4 x i1> %mask) { 3770; CHECK-LABEL: test_vsoxseg3_mask_nxv4i16_triscv.vector.tuple_nxv8i8_3t_nxv4i8: 3771; CHECK: # %bb.0: # %entry 3772; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma 3773; CHECK-NEXT: vsoxseg3ei8.v v8, (a0), v11, v0.t 3774; CHECK-NEXT: ret 3775entry: 3776 tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 4) 3777 ret void 3778} 3779 3780declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 3), ptr, <vscale x 4 x i16>, i64, i64) 3781declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 3), ptr, <vscale x 4 x i16>, <vscale x 4 x i1>, i64, i64) 3782 3783define void @test_vsoxseg3_nxv4i16_triscv.vector.tuple_nxv8i8_3t_nxv4i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, <vscale x 4 x i16> %index, i64 %vl) { 3784; CHECK-LABEL: test_vsoxseg3_nxv4i16_triscv.vector.tuple_nxv8i8_3t_nxv4i16: 3785; CHECK: # %bb.0: # %entry 3786; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma 3787; CHECK-NEXT: vsoxseg3ei16.v v8, (a0), v11 3788; CHECK-NEXT: ret 3789entry: 3790 tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, <vscale x 4 x i16> %index, i64 %vl, i64 4) 3791 ret void 3792} 3793 3794define void @test_vsoxseg3_mask_nxv4i16_triscv.vector.tuple_nxv8i8_3t_nxv4i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, <vscale x 4 x i16> %index, i64 %vl, <vscale x 4 x i1> %mask) { 3795; CHECK-LABEL: test_vsoxseg3_mask_nxv4i16_triscv.vector.tuple_nxv8i8_3t_nxv4i16: 3796; CHECK: # %bb.0: # %entry 3797; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma 3798; CHECK-NEXT: vsoxseg3ei16.v v8, (a0), v11, v0.t 3799; CHECK-NEXT: ret 3800entry: 3801 tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 4) 3802 ret void 3803} 3804 3805declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 3), ptr, <vscale x 4 x i32>, i64, i64) 3806declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 3), ptr, <vscale x 4 x i32>, <vscale x 4 x i1>, i64, i64) 3807 3808define void @test_vsoxseg3_nxv4i16_triscv.vector.tuple_nxv8i8_3t_nxv4i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, <vscale x 4 x i32> %index, i64 %vl) { 3809; CHECK-LABEL: test_vsoxseg3_nxv4i16_triscv.vector.tuple_nxv8i8_3t_nxv4i32: 3810; CHECK: # %bb.0: # %entry 3811; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma 3812; CHECK-NEXT: vsoxseg3ei32.v v8, (a0), v12 3813; CHECK-NEXT: ret 3814entry: 3815 tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, <vscale x 4 x i32> %index, i64 %vl, i64 4) 3816 ret void 3817} 3818 3819define void @test_vsoxseg3_mask_nxv4i16_triscv.vector.tuple_nxv8i8_3t_nxv4i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, <vscale x 4 x i32> %index, i64 %vl, <vscale x 4 x i1> %mask) { 3820; CHECK-LABEL: test_vsoxseg3_mask_nxv4i16_triscv.vector.tuple_nxv8i8_3t_nxv4i32: 3821; CHECK: # %bb.0: # %entry 3822; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma 3823; CHECK-NEXT: vsoxseg3ei32.v v8, (a0), v12, v0.t 3824; CHECK-NEXT: ret 3825entry: 3826 tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 4) 3827 ret void 3828} 3829 3830declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 3), ptr, <vscale x 4 x i64>, i64, i64) 3831declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i64.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 3), ptr, <vscale x 4 x i64>, <vscale x 4 x i1>, i64, i64) 3832 3833define void @test_vsoxseg3_nxv4i16_triscv.vector.tuple_nxv8i8_3t_nxv4i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, <vscale x 4 x i64> %index, i64 %vl) { 3834; CHECK-LABEL: test_vsoxseg3_nxv4i16_triscv.vector.tuple_nxv8i8_3t_nxv4i64: 3835; CHECK: # %bb.0: # %entry 3836; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma 3837; CHECK-NEXT: vsoxseg3ei64.v v8, (a0), v12 3838; CHECK-NEXT: ret 3839entry: 3840 tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, <vscale x 4 x i64> %index, i64 %vl, i64 4) 3841 ret void 3842} 3843 3844define void @test_vsoxseg3_mask_nxv4i16_triscv.vector.tuple_nxv8i8_3t_nxv4i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, <vscale x 4 x i64> %index, i64 %vl, <vscale x 4 x i1> %mask) { 3845; CHECK-LABEL: test_vsoxseg3_mask_nxv4i16_triscv.vector.tuple_nxv8i8_3t_nxv4i64: 3846; CHECK: # %bb.0: # %entry 3847; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma 3848; CHECK-NEXT: vsoxseg3ei64.v v8, (a0), v12, v0.t 3849; CHECK-NEXT: ret 3850entry: 3851 tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i64.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 4) 3852 ret void 3853} 3854 3855declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i8(target("riscv.vector.tuple", <vscale x 16 x i8>, 3), ptr, <vscale x 8 x i8>, i64, i64) 3856declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 3), ptr, <vscale x 8 x i8>, <vscale x 8 x i1>, i64, i64) 3857 3858define void @test_vsoxseg3_nxv8i16_triscv.vector.tuple_nxv16i8_3t_nxv8i8(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, <vscale x 8 x i8> %index, i64 %vl) { 3859; CHECK-LABEL: test_vsoxseg3_nxv8i16_triscv.vector.tuple_nxv16i8_3t_nxv8i8: 3860; CHECK: # %bb.0: # %entry 3861; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma 3862; CHECK-NEXT: vsoxseg3ei8.v v8, (a0), v14 3863; CHECK-NEXT: ret 3864entry: 3865 tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i8(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, <vscale x 8 x i8> %index, i64 %vl, i64 4) 3866 ret void 3867} 3868 3869define void @test_vsoxseg3_mask_nxv8i16_triscv.vector.tuple_nxv16i8_3t_nxv8i8(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, <vscale x 8 x i8> %index, i64 %vl, <vscale x 8 x i1> %mask) { 3870; CHECK-LABEL: test_vsoxseg3_mask_nxv8i16_triscv.vector.tuple_nxv16i8_3t_nxv8i8: 3871; CHECK: # %bb.0: # %entry 3872; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma 3873; CHECK-NEXT: vsoxseg3ei8.v v8, (a0), v14, v0.t 3874; CHECK-NEXT: ret 3875entry: 3876 tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, <vscale x 8 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 4) 3877 ret void 3878} 3879 3880declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i16(target("riscv.vector.tuple", <vscale x 16 x i8>, 3), ptr, <vscale x 8 x i16>, i64, i64) 3881declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 3), ptr, <vscale x 8 x i16>, <vscale x 8 x i1>, i64, i64) 3882 3883define void @test_vsoxseg3_nxv8i16_triscv.vector.tuple_nxv16i8_3t_nxv8i16(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, <vscale x 8 x i16> %index, i64 %vl) { 3884; CHECK-LABEL: test_vsoxseg3_nxv8i16_triscv.vector.tuple_nxv16i8_3t_nxv8i16: 3885; CHECK: # %bb.0: # %entry 3886; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma 3887; CHECK-NEXT: vsoxseg3ei16.v v8, (a0), v14 3888; CHECK-NEXT: ret 3889entry: 3890 tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i16(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, <vscale x 8 x i16> %index, i64 %vl, i64 4) 3891 ret void 3892} 3893 3894define void @test_vsoxseg3_mask_nxv8i16_triscv.vector.tuple_nxv16i8_3t_nxv8i16(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, <vscale x 8 x i16> %index, i64 %vl, <vscale x 8 x i1> %mask) { 3895; CHECK-LABEL: test_vsoxseg3_mask_nxv8i16_triscv.vector.tuple_nxv16i8_3t_nxv8i16: 3896; CHECK: # %bb.0: # %entry 3897; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma 3898; CHECK-NEXT: vsoxseg3ei16.v v8, (a0), v14, v0.t 3899; CHECK-NEXT: ret 3900entry: 3901 tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 4) 3902 ret void 3903} 3904 3905declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i32(target("riscv.vector.tuple", <vscale x 16 x i8>, 3), ptr, <vscale x 8 x i32>, i64, i64) 3906declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 3), ptr, <vscale x 8 x i32>, <vscale x 8 x i1>, i64, i64) 3907 3908define void @test_vsoxseg3_nxv8i16_triscv.vector.tuple_nxv16i8_3t_nxv8i32(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, <vscale x 8 x i32> %index, i64 %vl) { 3909; CHECK-LABEL: test_vsoxseg3_nxv8i16_triscv.vector.tuple_nxv16i8_3t_nxv8i32: 3910; CHECK: # %bb.0: # %entry 3911; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma 3912; CHECK-NEXT: vsoxseg3ei32.v v8, (a0), v16 3913; CHECK-NEXT: ret 3914entry: 3915 tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i32(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, <vscale x 8 x i32> %index, i64 %vl, i64 4) 3916 ret void 3917} 3918 3919define void @test_vsoxseg3_mask_nxv8i16_triscv.vector.tuple_nxv16i8_3t_nxv8i32(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, <vscale x 8 x i32> %index, i64 %vl, <vscale x 8 x i1> %mask) { 3920; CHECK-LABEL: test_vsoxseg3_mask_nxv8i16_triscv.vector.tuple_nxv16i8_3t_nxv8i32: 3921; CHECK: # %bb.0: # %entry 3922; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma 3923; CHECK-NEXT: vsoxseg3ei32.v v8, (a0), v16, v0.t 3924; CHECK-NEXT: ret 3925entry: 3926 tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 4) 3927 ret void 3928} 3929 3930declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 3), ptr, <vscale x 8 x i64>, i64, i64) 3931declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i64.nxv8i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 3), ptr, <vscale x 8 x i64>, <vscale x 8 x i1>, i64, i64) 3932 3933define void @test_vsoxseg3_nxv8i16_triscv.vector.tuple_nxv16i8_3t_nxv8i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, <vscale x 8 x i64> %index, i64 %vl) { 3934; CHECK-LABEL: test_vsoxseg3_nxv8i16_triscv.vector.tuple_nxv16i8_3t_nxv8i64: 3935; CHECK: # %bb.0: # %entry 3936; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma 3937; CHECK-NEXT: vsoxseg3ei64.v v8, (a0), v16 3938; CHECK-NEXT: ret 3939entry: 3940 tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, <vscale x 8 x i64> %index, i64 %vl, i64 4) 3941 ret void 3942} 3943 3944define void @test_vsoxseg3_mask_nxv8i16_triscv.vector.tuple_nxv16i8_3t_nxv8i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, <vscale x 8 x i64> %index, i64 %vl, <vscale x 8 x i1> %mask) { 3945; CHECK-LABEL: test_vsoxseg3_mask_nxv8i16_triscv.vector.tuple_nxv16i8_3t_nxv8i64: 3946; CHECK: # %bb.0: # %entry 3947; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma 3948; CHECK-NEXT: vsoxseg3ei64.v v8, (a0), v16, v0.t 3949; CHECK-NEXT: ret 3950entry: 3951 tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i64.nxv8i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, <vscale x 8 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 4) 3952 ret void 3953} 3954 3955declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i8(target("riscv.vector.tuple", <vscale x 2 x i8>, 4), ptr, <vscale x 1 x i8>, i64, i64) 3956declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 4), ptr, <vscale x 1 x i8>, <vscale x 1 x i1>, i64, i64) 3957 3958define void @test_vsoxseg4_nxv1i16_triscv.vector.tuple_nxv2i8_4t_nxv1i8(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) %val, ptr %base, <vscale x 1 x i8> %index, i64 %vl) { 3959; CHECK-LABEL: test_vsoxseg4_nxv1i16_triscv.vector.tuple_nxv2i8_4t_nxv1i8: 3960; CHECK: # %bb.0: # %entry 3961; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma 3962; CHECK-NEXT: vsoxseg4ei8.v v8, (a0), v12 3963; CHECK-NEXT: ret 3964entry: 3965 tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i8(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) %val, ptr %base, <vscale x 1 x i8> %index, i64 %vl, i64 4) 3966 ret void 3967} 3968 3969define void @test_vsoxseg4_mask_nxv1i16_triscv.vector.tuple_nxv2i8_4t_nxv1i8(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) %val, ptr %base, <vscale x 1 x i8> %index, i64 %vl, <vscale x 1 x i1> %mask) { 3970; CHECK-LABEL: test_vsoxseg4_mask_nxv1i16_triscv.vector.tuple_nxv2i8_4t_nxv1i8: 3971; CHECK: # %bb.0: # %entry 3972; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma 3973; CHECK-NEXT: vsoxseg4ei8.v v8, (a0), v12, v0.t 3974; CHECK-NEXT: ret 3975entry: 3976 tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 4) 3977 ret void 3978} 3979 3980declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i16(target("riscv.vector.tuple", <vscale x 2 x i8>, 4), ptr, <vscale x 1 x i16>, i64, i64) 3981declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 4), ptr, <vscale x 1 x i16>, <vscale x 1 x i1>, i64, i64) 3982 3983define void @test_vsoxseg4_nxv1i16_triscv.vector.tuple_nxv2i8_4t_nxv1i16(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl) { 3984; CHECK-LABEL: test_vsoxseg4_nxv1i16_triscv.vector.tuple_nxv2i8_4t_nxv1i16: 3985; CHECK: # %bb.0: # %entry 3986; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma 3987; CHECK-NEXT: vsoxseg4ei16.v v8, (a0), v12 3988; CHECK-NEXT: ret 3989entry: 3990 tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i16(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl, i64 4) 3991 ret void 3992} 3993 3994define void @test_vsoxseg4_mask_nxv1i16_triscv.vector.tuple_nxv2i8_4t_nxv1i16(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl, <vscale x 1 x i1> %mask) { 3995; CHECK-LABEL: test_vsoxseg4_mask_nxv1i16_triscv.vector.tuple_nxv2i8_4t_nxv1i16: 3996; CHECK: # %bb.0: # %entry 3997; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma 3998; CHECK-NEXT: vsoxseg4ei16.v v8, (a0), v12, v0.t 3999; CHECK-NEXT: ret 4000entry: 4001 tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 4) 4002 ret void 4003} 4004 4005declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i32(target("riscv.vector.tuple", <vscale x 2 x i8>, 4), ptr, <vscale x 1 x i32>, i64, i64) 4006declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 4), ptr, <vscale x 1 x i32>, <vscale x 1 x i1>, i64, i64) 4007 4008define void @test_vsoxseg4_nxv1i16_triscv.vector.tuple_nxv2i8_4t_nxv1i32(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) %val, ptr %base, <vscale x 1 x i32> %index, i64 %vl) { 4009; CHECK-LABEL: test_vsoxseg4_nxv1i16_triscv.vector.tuple_nxv2i8_4t_nxv1i32: 4010; CHECK: # %bb.0: # %entry 4011; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma 4012; CHECK-NEXT: vsoxseg4ei32.v v8, (a0), v12 4013; CHECK-NEXT: ret 4014entry: 4015 tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i32(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) %val, ptr %base, <vscale x 1 x i32> %index, i64 %vl, i64 4) 4016 ret void 4017} 4018 4019define void @test_vsoxseg4_mask_nxv1i16_triscv.vector.tuple_nxv2i8_4t_nxv1i32(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) %val, ptr %base, <vscale x 1 x i32> %index, i64 %vl, <vscale x 1 x i1> %mask) { 4020; CHECK-LABEL: test_vsoxseg4_mask_nxv1i16_triscv.vector.tuple_nxv2i8_4t_nxv1i32: 4021; CHECK: # %bb.0: # %entry 4022; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma 4023; CHECK-NEXT: vsoxseg4ei32.v v8, (a0), v12, v0.t 4024; CHECK-NEXT: ret 4025entry: 4026 tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 4) 4027 ret void 4028} 4029 4030declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 4), ptr, <vscale x 1 x i64>, i64, i64) 4031declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i64.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 4), ptr, <vscale x 1 x i64>, <vscale x 1 x i1>, i64, i64) 4032 4033define void @test_vsoxseg4_nxv1i16_triscv.vector.tuple_nxv2i8_4t_nxv1i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) %val, ptr %base, <vscale x 1 x i64> %index, i64 %vl) { 4034; CHECK-LABEL: test_vsoxseg4_nxv1i16_triscv.vector.tuple_nxv2i8_4t_nxv1i64: 4035; CHECK: # %bb.0: # %entry 4036; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma 4037; CHECK-NEXT: vsoxseg4ei64.v v8, (a0), v12 4038; CHECK-NEXT: ret 4039entry: 4040 tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) %val, ptr %base, <vscale x 1 x i64> %index, i64 %vl, i64 4) 4041 ret void 4042} 4043 4044define void @test_vsoxseg4_mask_nxv1i16_triscv.vector.tuple_nxv2i8_4t_nxv1i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) %val, ptr %base, <vscale x 1 x i64> %index, i64 %vl, <vscale x 1 x i1> %mask) { 4045; CHECK-LABEL: test_vsoxseg4_mask_nxv1i16_triscv.vector.tuple_nxv2i8_4t_nxv1i64: 4046; CHECK: # %bb.0: # %entry 4047; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma 4048; CHECK-NEXT: vsoxseg4ei64.v v8, (a0), v12, v0.t 4049; CHECK-NEXT: ret 4050entry: 4051 tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i64.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) %val, ptr %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 4) 4052 ret void 4053} 4054 4055declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 4), ptr, <vscale x 2 x i8>, i64, i64) 4056declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 4), ptr, <vscale x 2 x i8>, <vscale x 2 x i1>, i64, i64) 4057 4058define void @test_vsoxseg4_nxv2i16_triscv.vector.tuple_nxv4i8_4t_nxv2i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %val, ptr %base, <vscale x 2 x i8> %index, i64 %vl) { 4059; CHECK-LABEL: test_vsoxseg4_nxv2i16_triscv.vector.tuple_nxv4i8_4t_nxv2i8: 4060; CHECK: # %bb.0: # %entry 4061; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma 4062; CHECK-NEXT: vsoxseg4ei8.v v8, (a0), v12 4063; CHECK-NEXT: ret 4064entry: 4065 tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %val, ptr %base, <vscale x 2 x i8> %index, i64 %vl, i64 4) 4066 ret void 4067} 4068 4069define void @test_vsoxseg4_mask_nxv2i16_triscv.vector.tuple_nxv4i8_4t_nxv2i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %val, ptr %base, <vscale x 2 x i8> %index, i64 %vl, <vscale x 2 x i1> %mask) { 4070; CHECK-LABEL: test_vsoxseg4_mask_nxv2i16_triscv.vector.tuple_nxv4i8_4t_nxv2i8: 4071; CHECK: # %bb.0: # %entry 4072; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma 4073; CHECK-NEXT: vsoxseg4ei8.v v8, (a0), v12, v0.t 4074; CHECK-NEXT: ret 4075entry: 4076 tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 4) 4077 ret void 4078} 4079 4080declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 4), ptr, <vscale x 2 x i16>, i64, i64) 4081declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 4), ptr, <vscale x 2 x i16>, <vscale x 2 x i1>, i64, i64) 4082 4083define void @test_vsoxseg4_nxv2i16_triscv.vector.tuple_nxv4i8_4t_nxv2i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %val, ptr %base, <vscale x 2 x i16> %index, i64 %vl) { 4084; CHECK-LABEL: test_vsoxseg4_nxv2i16_triscv.vector.tuple_nxv4i8_4t_nxv2i16: 4085; CHECK: # %bb.0: # %entry 4086; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma 4087; CHECK-NEXT: vsoxseg4ei16.v v8, (a0), v12 4088; CHECK-NEXT: ret 4089entry: 4090 tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %val, ptr %base, <vscale x 2 x i16> %index, i64 %vl, i64 4) 4091 ret void 4092} 4093 4094define void @test_vsoxseg4_mask_nxv2i16_triscv.vector.tuple_nxv4i8_4t_nxv2i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %val, ptr %base, <vscale x 2 x i16> %index, i64 %vl, <vscale x 2 x i1> %mask) { 4095; CHECK-LABEL: test_vsoxseg4_mask_nxv2i16_triscv.vector.tuple_nxv4i8_4t_nxv2i16: 4096; CHECK: # %bb.0: # %entry 4097; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma 4098; CHECK-NEXT: vsoxseg4ei16.v v8, (a0), v12, v0.t 4099; CHECK-NEXT: ret 4100entry: 4101 tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 4) 4102 ret void 4103} 4104 4105declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 4), ptr, <vscale x 2 x i32>, i64, i64) 4106declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 4), ptr, <vscale x 2 x i32>, <vscale x 2 x i1>, i64, i64) 4107 4108define void @test_vsoxseg4_nxv2i16_triscv.vector.tuple_nxv4i8_4t_nxv2i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %val, ptr %base, <vscale x 2 x i32> %index, i64 %vl) { 4109; CHECK-LABEL: test_vsoxseg4_nxv2i16_triscv.vector.tuple_nxv4i8_4t_nxv2i32: 4110; CHECK: # %bb.0: # %entry 4111; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma 4112; CHECK-NEXT: vsoxseg4ei32.v v8, (a0), v12 4113; CHECK-NEXT: ret 4114entry: 4115 tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %val, ptr %base, <vscale x 2 x i32> %index, i64 %vl, i64 4) 4116 ret void 4117} 4118 4119define void @test_vsoxseg4_mask_nxv2i16_triscv.vector.tuple_nxv4i8_4t_nxv2i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %val, ptr %base, <vscale x 2 x i32> %index, i64 %vl, <vscale x 2 x i1> %mask) { 4120; CHECK-LABEL: test_vsoxseg4_mask_nxv2i16_triscv.vector.tuple_nxv4i8_4t_nxv2i32: 4121; CHECK: # %bb.0: # %entry 4122; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma 4123; CHECK-NEXT: vsoxseg4ei32.v v8, (a0), v12, v0.t 4124; CHECK-NEXT: ret 4125entry: 4126 tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 4) 4127 ret void 4128} 4129 4130declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 4), ptr, <vscale x 2 x i64>, i64, i64) 4131declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i64.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 4), ptr, <vscale x 2 x i64>, <vscale x 2 x i1>, i64, i64) 4132 4133define void @test_vsoxseg4_nxv2i16_triscv.vector.tuple_nxv4i8_4t_nxv2i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl) { 4134; CHECK-LABEL: test_vsoxseg4_nxv2i16_triscv.vector.tuple_nxv4i8_4t_nxv2i64: 4135; CHECK: # %bb.0: # %entry 4136; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma 4137; CHECK-NEXT: vsoxseg4ei64.v v8, (a0), v12 4138; CHECK-NEXT: ret 4139entry: 4140 tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl, i64 4) 4141 ret void 4142} 4143 4144define void @test_vsoxseg4_mask_nxv2i16_triscv.vector.tuple_nxv4i8_4t_nxv2i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl, <vscale x 2 x i1> %mask) { 4145; CHECK-LABEL: test_vsoxseg4_mask_nxv2i16_triscv.vector.tuple_nxv4i8_4t_nxv2i64: 4146; CHECK: # %bb.0: # %entry 4147; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma 4148; CHECK-NEXT: vsoxseg4ei64.v v8, (a0), v12, v0.t 4149; CHECK-NEXT: ret 4150entry: 4151 tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i64.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %val, ptr %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 4) 4152 ret void 4153} 4154 4155declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 4), ptr, <vscale x 4 x i8>, i64, i64) 4156declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 4), ptr, <vscale x 4 x i8>, <vscale x 4 x i1>, i64, i64) 4157 4158define void @test_vsoxseg4_nxv4i16_triscv.vector.tuple_nxv8i8_4t_nxv4i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, <vscale x 4 x i8> %index, i64 %vl) { 4159; CHECK-LABEL: test_vsoxseg4_nxv4i16_triscv.vector.tuple_nxv8i8_4t_nxv4i8: 4160; CHECK: # %bb.0: # %entry 4161; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma 4162; CHECK-NEXT: vsoxseg4ei8.v v8, (a0), v12 4163; CHECK-NEXT: ret 4164entry: 4165 tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, <vscale x 4 x i8> %index, i64 %vl, i64 4) 4166 ret void 4167} 4168 4169define void @test_vsoxseg4_mask_nxv4i16_triscv.vector.tuple_nxv8i8_4t_nxv4i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, <vscale x 4 x i8> %index, i64 %vl, <vscale x 4 x i1> %mask) { 4170; CHECK-LABEL: test_vsoxseg4_mask_nxv4i16_triscv.vector.tuple_nxv8i8_4t_nxv4i8: 4171; CHECK: # %bb.0: # %entry 4172; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma 4173; CHECK-NEXT: vsoxseg4ei8.v v8, (a0), v12, v0.t 4174; CHECK-NEXT: ret 4175entry: 4176 tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 4) 4177 ret void 4178} 4179 4180declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 4), ptr, <vscale x 4 x i16>, i64, i64) 4181declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 4), ptr, <vscale x 4 x i16>, <vscale x 4 x i1>, i64, i64) 4182 4183define void @test_vsoxseg4_nxv4i16_triscv.vector.tuple_nxv8i8_4t_nxv4i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, <vscale x 4 x i16> %index, i64 %vl) { 4184; CHECK-LABEL: test_vsoxseg4_nxv4i16_triscv.vector.tuple_nxv8i8_4t_nxv4i16: 4185; CHECK: # %bb.0: # %entry 4186; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma 4187; CHECK-NEXT: vsoxseg4ei16.v v8, (a0), v12 4188; CHECK-NEXT: ret 4189entry: 4190 tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, <vscale x 4 x i16> %index, i64 %vl, i64 4) 4191 ret void 4192} 4193 4194define void @test_vsoxseg4_mask_nxv4i16_triscv.vector.tuple_nxv8i8_4t_nxv4i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, <vscale x 4 x i16> %index, i64 %vl, <vscale x 4 x i1> %mask) { 4195; CHECK-LABEL: test_vsoxseg4_mask_nxv4i16_triscv.vector.tuple_nxv8i8_4t_nxv4i16: 4196; CHECK: # %bb.0: # %entry 4197; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma 4198; CHECK-NEXT: vsoxseg4ei16.v v8, (a0), v12, v0.t 4199; CHECK-NEXT: ret 4200entry: 4201 tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 4) 4202 ret void 4203} 4204 4205declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 4), ptr, <vscale x 4 x i32>, i64, i64) 4206declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 4), ptr, <vscale x 4 x i32>, <vscale x 4 x i1>, i64, i64) 4207 4208define void @test_vsoxseg4_nxv4i16_triscv.vector.tuple_nxv8i8_4t_nxv4i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, <vscale x 4 x i32> %index, i64 %vl) { 4209; CHECK-LABEL: test_vsoxseg4_nxv4i16_triscv.vector.tuple_nxv8i8_4t_nxv4i32: 4210; CHECK: # %bb.0: # %entry 4211; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma 4212; CHECK-NEXT: vsoxseg4ei32.v v8, (a0), v12 4213; CHECK-NEXT: ret 4214entry: 4215 tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, <vscale x 4 x i32> %index, i64 %vl, i64 4) 4216 ret void 4217} 4218 4219define void @test_vsoxseg4_mask_nxv4i16_triscv.vector.tuple_nxv8i8_4t_nxv4i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, <vscale x 4 x i32> %index, i64 %vl, <vscale x 4 x i1> %mask) { 4220; CHECK-LABEL: test_vsoxseg4_mask_nxv4i16_triscv.vector.tuple_nxv8i8_4t_nxv4i32: 4221; CHECK: # %bb.0: # %entry 4222; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma 4223; CHECK-NEXT: vsoxseg4ei32.v v8, (a0), v12, v0.t 4224; CHECK-NEXT: ret 4225entry: 4226 tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 4) 4227 ret void 4228} 4229 4230declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 4), ptr, <vscale x 4 x i64>, i64, i64) 4231declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i64.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 4), ptr, <vscale x 4 x i64>, <vscale x 4 x i1>, i64, i64) 4232 4233define void @test_vsoxseg4_nxv4i16_triscv.vector.tuple_nxv8i8_4t_nxv4i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, <vscale x 4 x i64> %index, i64 %vl) { 4234; CHECK-LABEL: test_vsoxseg4_nxv4i16_triscv.vector.tuple_nxv8i8_4t_nxv4i64: 4235; CHECK: # %bb.0: # %entry 4236; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma 4237; CHECK-NEXT: vsoxseg4ei64.v v8, (a0), v12 4238; CHECK-NEXT: ret 4239entry: 4240 tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, <vscale x 4 x i64> %index, i64 %vl, i64 4) 4241 ret void 4242} 4243 4244define void @test_vsoxseg4_mask_nxv4i16_triscv.vector.tuple_nxv8i8_4t_nxv4i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, <vscale x 4 x i64> %index, i64 %vl, <vscale x 4 x i1> %mask) { 4245; CHECK-LABEL: test_vsoxseg4_mask_nxv4i16_triscv.vector.tuple_nxv8i8_4t_nxv4i64: 4246; CHECK: # %bb.0: # %entry 4247; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma 4248; CHECK-NEXT: vsoxseg4ei64.v v8, (a0), v12, v0.t 4249; CHECK-NEXT: ret 4250entry: 4251 tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i64.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 4) 4252 ret void 4253} 4254 4255declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i8(target("riscv.vector.tuple", <vscale x 16 x i8>, 4), ptr, <vscale x 8 x i8>, i64, i64) 4256declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 4), ptr, <vscale x 8 x i8>, <vscale x 8 x i1>, i64, i64) 4257 4258define void @test_vsoxseg4_nxv8i16_triscv.vector.tuple_nxv16i8_4t_nxv8i8(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, <vscale x 8 x i8> %index, i64 %vl) { 4259; CHECK-LABEL: test_vsoxseg4_nxv8i16_triscv.vector.tuple_nxv16i8_4t_nxv8i8: 4260; CHECK: # %bb.0: # %entry 4261; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma 4262; CHECK-NEXT: vsoxseg4ei8.v v8, (a0), v16 4263; CHECK-NEXT: ret 4264entry: 4265 tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i8(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, <vscale x 8 x i8> %index, i64 %vl, i64 4) 4266 ret void 4267} 4268 4269define void @test_vsoxseg4_mask_nxv8i16_triscv.vector.tuple_nxv16i8_4t_nxv8i8(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, <vscale x 8 x i8> %index, i64 %vl, <vscale x 8 x i1> %mask) { 4270; CHECK-LABEL: test_vsoxseg4_mask_nxv8i16_triscv.vector.tuple_nxv16i8_4t_nxv8i8: 4271; CHECK: # %bb.0: # %entry 4272; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma 4273; CHECK-NEXT: vsoxseg4ei8.v v8, (a0), v16, v0.t 4274; CHECK-NEXT: ret 4275entry: 4276 tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, <vscale x 8 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 4) 4277 ret void 4278} 4279 4280declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i16(target("riscv.vector.tuple", <vscale x 16 x i8>, 4), ptr, <vscale x 8 x i16>, i64, i64) 4281declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 4), ptr, <vscale x 8 x i16>, <vscale x 8 x i1>, i64, i64) 4282 4283define void @test_vsoxseg4_nxv8i16_triscv.vector.tuple_nxv16i8_4t_nxv8i16(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, <vscale x 8 x i16> %index, i64 %vl) { 4284; CHECK-LABEL: test_vsoxseg4_nxv8i16_triscv.vector.tuple_nxv16i8_4t_nxv8i16: 4285; CHECK: # %bb.0: # %entry 4286; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma 4287; CHECK-NEXT: vsoxseg4ei16.v v8, (a0), v16 4288; CHECK-NEXT: ret 4289entry: 4290 tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i16(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, <vscale x 8 x i16> %index, i64 %vl, i64 4) 4291 ret void 4292} 4293 4294define void @test_vsoxseg4_mask_nxv8i16_triscv.vector.tuple_nxv16i8_4t_nxv8i16(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, <vscale x 8 x i16> %index, i64 %vl, <vscale x 8 x i1> %mask) { 4295; CHECK-LABEL: test_vsoxseg4_mask_nxv8i16_triscv.vector.tuple_nxv16i8_4t_nxv8i16: 4296; CHECK: # %bb.0: # %entry 4297; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma 4298; CHECK-NEXT: vsoxseg4ei16.v v8, (a0), v16, v0.t 4299; CHECK-NEXT: ret 4300entry: 4301 tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 4) 4302 ret void 4303} 4304 4305declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i32(target("riscv.vector.tuple", <vscale x 16 x i8>, 4), ptr, <vscale x 8 x i32>, i64, i64) 4306declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 4), ptr, <vscale x 8 x i32>, <vscale x 8 x i1>, i64, i64) 4307 4308define void @test_vsoxseg4_nxv8i16_triscv.vector.tuple_nxv16i8_4t_nxv8i32(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, <vscale x 8 x i32> %index, i64 %vl) { 4309; CHECK-LABEL: test_vsoxseg4_nxv8i16_triscv.vector.tuple_nxv16i8_4t_nxv8i32: 4310; CHECK: # %bb.0: # %entry 4311; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma 4312; CHECK-NEXT: vsoxseg4ei32.v v8, (a0), v16 4313; CHECK-NEXT: ret 4314entry: 4315 tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i32(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, <vscale x 8 x i32> %index, i64 %vl, i64 4) 4316 ret void 4317} 4318 4319define void @test_vsoxseg4_mask_nxv8i16_triscv.vector.tuple_nxv16i8_4t_nxv8i32(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, <vscale x 8 x i32> %index, i64 %vl, <vscale x 8 x i1> %mask) { 4320; CHECK-LABEL: test_vsoxseg4_mask_nxv8i16_triscv.vector.tuple_nxv16i8_4t_nxv8i32: 4321; CHECK: # %bb.0: # %entry 4322; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma 4323; CHECK-NEXT: vsoxseg4ei32.v v8, (a0), v16, v0.t 4324; CHECK-NEXT: ret 4325entry: 4326 tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 4) 4327 ret void 4328} 4329 4330declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 4), ptr, <vscale x 8 x i64>, i64, i64) 4331declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i64.nxv8i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 4), ptr, <vscale x 8 x i64>, <vscale x 8 x i1>, i64, i64) 4332 4333define void @test_vsoxseg4_nxv8i16_triscv.vector.tuple_nxv16i8_4t_nxv8i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, <vscale x 8 x i64> %index, i64 %vl) { 4334; CHECK-LABEL: test_vsoxseg4_nxv8i16_triscv.vector.tuple_nxv16i8_4t_nxv8i64: 4335; CHECK: # %bb.0: # %entry 4336; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma 4337; CHECK-NEXT: vsoxseg4ei64.v v8, (a0), v16 4338; CHECK-NEXT: ret 4339entry: 4340 tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, <vscale x 8 x i64> %index, i64 %vl, i64 4) 4341 ret void 4342} 4343 4344define void @test_vsoxseg4_mask_nxv8i16_triscv.vector.tuple_nxv16i8_4t_nxv8i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, <vscale x 8 x i64> %index, i64 %vl, <vscale x 8 x i1> %mask) { 4345; CHECK-LABEL: test_vsoxseg4_mask_nxv8i16_triscv.vector.tuple_nxv16i8_4t_nxv8i64: 4346; CHECK: # %bb.0: # %entry 4347; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma 4348; CHECK-NEXT: vsoxseg4ei64.v v8, (a0), v16, v0.t 4349; CHECK-NEXT: ret 4350entry: 4351 tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i64.nxv8i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, <vscale x 8 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 4) 4352 ret void 4353} 4354 4355declare void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i8(target("riscv.vector.tuple", <vscale x 2 x i8>, 5), ptr, <vscale x 1 x i8>, i64, i64) 4356declare void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 5), ptr, <vscale x 1 x i8>, <vscale x 1 x i1>, i64, i64) 4357 4358define void @test_vsoxseg5_nxv1i16_triscv.vector.tuple_nxv2i8_5t_nxv1i8(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) %val, ptr %base, <vscale x 1 x i8> %index, i64 %vl) { 4359; CHECK-LABEL: test_vsoxseg5_nxv1i16_triscv.vector.tuple_nxv2i8_5t_nxv1i8: 4360; CHECK: # %bb.0: # %entry 4361; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma 4362; CHECK-NEXT: vsoxseg5ei8.v v8, (a0), v13 4363; CHECK-NEXT: ret 4364entry: 4365 tail call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i8(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) %val, ptr %base, <vscale x 1 x i8> %index, i64 %vl, i64 4) 4366 ret void 4367} 4368 4369define void @test_vsoxseg5_mask_nxv1i16_triscv.vector.tuple_nxv2i8_5t_nxv1i8(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) %val, ptr %base, <vscale x 1 x i8> %index, i64 %vl, <vscale x 1 x i1> %mask) { 4370; CHECK-LABEL: test_vsoxseg5_mask_nxv1i16_triscv.vector.tuple_nxv2i8_5t_nxv1i8: 4371; CHECK: # %bb.0: # %entry 4372; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma 4373; CHECK-NEXT: vsoxseg5ei8.v v8, (a0), v13, v0.t 4374; CHECK-NEXT: ret 4375entry: 4376 tail call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 4) 4377 ret void 4378} 4379 4380declare void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i16(target("riscv.vector.tuple", <vscale x 2 x i8>, 5), ptr, <vscale x 1 x i16>, i64, i64) 4381declare void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 5), ptr, <vscale x 1 x i16>, <vscale x 1 x i1>, i64, i64) 4382 4383define void @test_vsoxseg5_nxv1i16_triscv.vector.tuple_nxv2i8_5t_nxv1i16(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl) { 4384; CHECK-LABEL: test_vsoxseg5_nxv1i16_triscv.vector.tuple_nxv2i8_5t_nxv1i16: 4385; CHECK: # %bb.0: # %entry 4386; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma 4387; CHECK-NEXT: vsoxseg5ei16.v v8, (a0), v13 4388; CHECK-NEXT: ret 4389entry: 4390 tail call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i16(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl, i64 4) 4391 ret void 4392} 4393 4394define void @test_vsoxseg5_mask_nxv1i16_triscv.vector.tuple_nxv2i8_5t_nxv1i16(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl, <vscale x 1 x i1> %mask) { 4395; CHECK-LABEL: test_vsoxseg5_mask_nxv1i16_triscv.vector.tuple_nxv2i8_5t_nxv1i16: 4396; CHECK: # %bb.0: # %entry 4397; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma 4398; CHECK-NEXT: vsoxseg5ei16.v v8, (a0), v13, v0.t 4399; CHECK-NEXT: ret 4400entry: 4401 tail call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 4) 4402 ret void 4403} 4404 4405declare void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i32(target("riscv.vector.tuple", <vscale x 2 x i8>, 5), ptr, <vscale x 1 x i32>, i64, i64) 4406declare void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 5), ptr, <vscale x 1 x i32>, <vscale x 1 x i1>, i64, i64) 4407 4408define void @test_vsoxseg5_nxv1i16_triscv.vector.tuple_nxv2i8_5t_nxv1i32(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) %val, ptr %base, <vscale x 1 x i32> %index, i64 %vl) { 4409; CHECK-LABEL: test_vsoxseg5_nxv1i16_triscv.vector.tuple_nxv2i8_5t_nxv1i32: 4410; CHECK: # %bb.0: # %entry 4411; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma 4412; CHECK-NEXT: vsoxseg5ei32.v v8, (a0), v13 4413; CHECK-NEXT: ret 4414entry: 4415 tail call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i32(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) %val, ptr %base, <vscale x 1 x i32> %index, i64 %vl, i64 4) 4416 ret void 4417} 4418 4419define void @test_vsoxseg5_mask_nxv1i16_triscv.vector.tuple_nxv2i8_5t_nxv1i32(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) %val, ptr %base, <vscale x 1 x i32> %index, i64 %vl, <vscale x 1 x i1> %mask) { 4420; CHECK-LABEL: test_vsoxseg5_mask_nxv1i16_triscv.vector.tuple_nxv2i8_5t_nxv1i32: 4421; CHECK: # %bb.0: # %entry 4422; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma 4423; CHECK-NEXT: vsoxseg5ei32.v v8, (a0), v13, v0.t 4424; CHECK-NEXT: ret 4425entry: 4426 tail call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 4) 4427 ret void 4428} 4429 4430declare void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 5), ptr, <vscale x 1 x i64>, i64, i64) 4431declare void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i64.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 5), ptr, <vscale x 1 x i64>, <vscale x 1 x i1>, i64, i64) 4432 4433define void @test_vsoxseg5_nxv1i16_triscv.vector.tuple_nxv2i8_5t_nxv1i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) %val, ptr %base, <vscale x 1 x i64> %index, i64 %vl) { 4434; CHECK-LABEL: test_vsoxseg5_nxv1i16_triscv.vector.tuple_nxv2i8_5t_nxv1i64: 4435; CHECK: # %bb.0: # %entry 4436; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma 4437; CHECK-NEXT: vsoxseg5ei64.v v8, (a0), v13 4438; CHECK-NEXT: ret 4439entry: 4440 tail call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) %val, ptr %base, <vscale x 1 x i64> %index, i64 %vl, i64 4) 4441 ret void 4442} 4443 4444define void @test_vsoxseg5_mask_nxv1i16_triscv.vector.tuple_nxv2i8_5t_nxv1i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) %val, ptr %base, <vscale x 1 x i64> %index, i64 %vl, <vscale x 1 x i1> %mask) { 4445; CHECK-LABEL: test_vsoxseg5_mask_nxv1i16_triscv.vector.tuple_nxv2i8_5t_nxv1i64: 4446; CHECK: # %bb.0: # %entry 4447; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma 4448; CHECK-NEXT: vsoxseg5ei64.v v8, (a0), v13, v0.t 4449; CHECK-NEXT: ret 4450entry: 4451 tail call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i64.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) %val, ptr %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 4) 4452 ret void 4453} 4454 4455declare void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 5), ptr, <vscale x 2 x i8>, i64, i64) 4456declare void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 5), ptr, <vscale x 2 x i8>, <vscale x 2 x i1>, i64, i64) 4457 4458define void @test_vsoxseg5_nxv2i16_triscv.vector.tuple_nxv4i8_5t_nxv2i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) %val, ptr %base, <vscale x 2 x i8> %index, i64 %vl) { 4459; CHECK-LABEL: test_vsoxseg5_nxv2i16_triscv.vector.tuple_nxv4i8_5t_nxv2i8: 4460; CHECK: # %bb.0: # %entry 4461; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma 4462; CHECK-NEXT: vsoxseg5ei8.v v8, (a0), v13 4463; CHECK-NEXT: ret 4464entry: 4465 tail call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) %val, ptr %base, <vscale x 2 x i8> %index, i64 %vl, i64 4) 4466 ret void 4467} 4468 4469define void @test_vsoxseg5_mask_nxv2i16_triscv.vector.tuple_nxv4i8_5t_nxv2i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) %val, ptr %base, <vscale x 2 x i8> %index, i64 %vl, <vscale x 2 x i1> %mask) { 4470; CHECK-LABEL: test_vsoxseg5_mask_nxv2i16_triscv.vector.tuple_nxv4i8_5t_nxv2i8: 4471; CHECK: # %bb.0: # %entry 4472; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma 4473; CHECK-NEXT: vsoxseg5ei8.v v8, (a0), v13, v0.t 4474; CHECK-NEXT: ret 4475entry: 4476 tail call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 4) 4477 ret void 4478} 4479 4480declare void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 5), ptr, <vscale x 2 x i16>, i64, i64) 4481declare void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 5), ptr, <vscale x 2 x i16>, <vscale x 2 x i1>, i64, i64) 4482 4483define void @test_vsoxseg5_nxv2i16_triscv.vector.tuple_nxv4i8_5t_nxv2i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) %val, ptr %base, <vscale x 2 x i16> %index, i64 %vl) { 4484; CHECK-LABEL: test_vsoxseg5_nxv2i16_triscv.vector.tuple_nxv4i8_5t_nxv2i16: 4485; CHECK: # %bb.0: # %entry 4486; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma 4487; CHECK-NEXT: vsoxseg5ei16.v v8, (a0), v13 4488; CHECK-NEXT: ret 4489entry: 4490 tail call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) %val, ptr %base, <vscale x 2 x i16> %index, i64 %vl, i64 4) 4491 ret void 4492} 4493 4494define void @test_vsoxseg5_mask_nxv2i16_triscv.vector.tuple_nxv4i8_5t_nxv2i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) %val, ptr %base, <vscale x 2 x i16> %index, i64 %vl, <vscale x 2 x i1> %mask) { 4495; CHECK-LABEL: test_vsoxseg5_mask_nxv2i16_triscv.vector.tuple_nxv4i8_5t_nxv2i16: 4496; CHECK: # %bb.0: # %entry 4497; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma 4498; CHECK-NEXT: vsoxseg5ei16.v v8, (a0), v13, v0.t 4499; CHECK-NEXT: ret 4500entry: 4501 tail call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 4) 4502 ret void 4503} 4504 4505declare void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 5), ptr, <vscale x 2 x i32>, i64, i64) 4506declare void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 5), ptr, <vscale x 2 x i32>, <vscale x 2 x i1>, i64, i64) 4507 4508define void @test_vsoxseg5_nxv2i16_triscv.vector.tuple_nxv4i8_5t_nxv2i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) %val, ptr %base, <vscale x 2 x i32> %index, i64 %vl) { 4509; CHECK-LABEL: test_vsoxseg5_nxv2i16_triscv.vector.tuple_nxv4i8_5t_nxv2i32: 4510; CHECK: # %bb.0: # %entry 4511; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma 4512; CHECK-NEXT: vsoxseg5ei32.v v8, (a0), v13 4513; CHECK-NEXT: ret 4514entry: 4515 tail call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) %val, ptr %base, <vscale x 2 x i32> %index, i64 %vl, i64 4) 4516 ret void 4517} 4518 4519define void @test_vsoxseg5_mask_nxv2i16_triscv.vector.tuple_nxv4i8_5t_nxv2i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) %val, ptr %base, <vscale x 2 x i32> %index, i64 %vl, <vscale x 2 x i1> %mask) { 4520; CHECK-LABEL: test_vsoxseg5_mask_nxv2i16_triscv.vector.tuple_nxv4i8_5t_nxv2i32: 4521; CHECK: # %bb.0: # %entry 4522; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma 4523; CHECK-NEXT: vsoxseg5ei32.v v8, (a0), v13, v0.t 4524; CHECK-NEXT: ret 4525entry: 4526 tail call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 4) 4527 ret void 4528} 4529 4530declare void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 5), ptr, <vscale x 2 x i64>, i64, i64) 4531declare void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i64.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 5), ptr, <vscale x 2 x i64>, <vscale x 2 x i1>, i64, i64) 4532 4533define void @test_vsoxseg5_nxv2i16_triscv.vector.tuple_nxv4i8_5t_nxv2i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl) { 4534; CHECK-LABEL: test_vsoxseg5_nxv2i16_triscv.vector.tuple_nxv4i8_5t_nxv2i64: 4535; CHECK: # %bb.0: # %entry 4536; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma 4537; CHECK-NEXT: vsoxseg5ei64.v v8, (a0), v14 4538; CHECK-NEXT: ret 4539entry: 4540 tail call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl, i64 4) 4541 ret void 4542} 4543 4544define void @test_vsoxseg5_mask_nxv2i16_triscv.vector.tuple_nxv4i8_5t_nxv2i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl, <vscale x 2 x i1> %mask) { 4545; CHECK-LABEL: test_vsoxseg5_mask_nxv2i16_triscv.vector.tuple_nxv4i8_5t_nxv2i64: 4546; CHECK: # %bb.0: # %entry 4547; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma 4548; CHECK-NEXT: vsoxseg5ei64.v v8, (a0), v14, v0.t 4549; CHECK-NEXT: ret 4550entry: 4551 tail call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i64.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) %val, ptr %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 4) 4552 ret void 4553} 4554 4555declare void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 5), ptr, <vscale x 4 x i8>, i64, i64) 4556declare void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 5), ptr, <vscale x 4 x i8>, <vscale x 4 x i1>, i64, i64) 4557 4558define void @test_vsoxseg5_nxv4i16_triscv.vector.tuple_nxv8i8_5t_nxv4i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, <vscale x 4 x i8> %index, i64 %vl) { 4559; CHECK-LABEL: test_vsoxseg5_nxv4i16_triscv.vector.tuple_nxv8i8_5t_nxv4i8: 4560; CHECK: # %bb.0: # %entry 4561; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma 4562; CHECK-NEXT: vsoxseg5ei8.v v8, (a0), v13 4563; CHECK-NEXT: ret 4564entry: 4565 tail call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, <vscale x 4 x i8> %index, i64 %vl, i64 4) 4566 ret void 4567} 4568 4569define void @test_vsoxseg5_mask_nxv4i16_triscv.vector.tuple_nxv8i8_5t_nxv4i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, <vscale x 4 x i8> %index, i64 %vl, <vscale x 4 x i1> %mask) { 4570; CHECK-LABEL: test_vsoxseg5_mask_nxv4i16_triscv.vector.tuple_nxv8i8_5t_nxv4i8: 4571; CHECK: # %bb.0: # %entry 4572; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma 4573; CHECK-NEXT: vsoxseg5ei8.v v8, (a0), v13, v0.t 4574; CHECK-NEXT: ret 4575entry: 4576 tail call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 4) 4577 ret void 4578} 4579 4580declare void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 5), ptr, <vscale x 4 x i16>, i64, i64) 4581declare void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 5), ptr, <vscale x 4 x i16>, <vscale x 4 x i1>, i64, i64) 4582 4583define void @test_vsoxseg5_nxv4i16_triscv.vector.tuple_nxv8i8_5t_nxv4i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, <vscale x 4 x i16> %index, i64 %vl) { 4584; CHECK-LABEL: test_vsoxseg5_nxv4i16_triscv.vector.tuple_nxv8i8_5t_nxv4i16: 4585; CHECK: # %bb.0: # %entry 4586; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma 4587; CHECK-NEXT: vsoxseg5ei16.v v8, (a0), v13 4588; CHECK-NEXT: ret 4589entry: 4590 tail call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, <vscale x 4 x i16> %index, i64 %vl, i64 4) 4591 ret void 4592} 4593 4594define void @test_vsoxseg5_mask_nxv4i16_triscv.vector.tuple_nxv8i8_5t_nxv4i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, <vscale x 4 x i16> %index, i64 %vl, <vscale x 4 x i1> %mask) { 4595; CHECK-LABEL: test_vsoxseg5_mask_nxv4i16_triscv.vector.tuple_nxv8i8_5t_nxv4i16: 4596; CHECK: # %bb.0: # %entry 4597; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma 4598; CHECK-NEXT: vsoxseg5ei16.v v8, (a0), v13, v0.t 4599; CHECK-NEXT: ret 4600entry: 4601 tail call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 4) 4602 ret void 4603} 4604 4605declare void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 5), ptr, <vscale x 4 x i32>, i64, i64) 4606declare void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 5), ptr, <vscale x 4 x i32>, <vscale x 4 x i1>, i64, i64) 4607 4608define void @test_vsoxseg5_nxv4i16_triscv.vector.tuple_nxv8i8_5t_nxv4i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, <vscale x 4 x i32> %index, i64 %vl) { 4609; CHECK-LABEL: test_vsoxseg5_nxv4i16_triscv.vector.tuple_nxv8i8_5t_nxv4i32: 4610; CHECK: # %bb.0: # %entry 4611; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma 4612; CHECK-NEXT: vsoxseg5ei32.v v8, (a0), v14 4613; CHECK-NEXT: ret 4614entry: 4615 tail call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, <vscale x 4 x i32> %index, i64 %vl, i64 4) 4616 ret void 4617} 4618 4619define void @test_vsoxseg5_mask_nxv4i16_triscv.vector.tuple_nxv8i8_5t_nxv4i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, <vscale x 4 x i32> %index, i64 %vl, <vscale x 4 x i1> %mask) { 4620; CHECK-LABEL: test_vsoxseg5_mask_nxv4i16_triscv.vector.tuple_nxv8i8_5t_nxv4i32: 4621; CHECK: # %bb.0: # %entry 4622; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma 4623; CHECK-NEXT: vsoxseg5ei32.v v8, (a0), v14, v0.t 4624; CHECK-NEXT: ret 4625entry: 4626 tail call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 4) 4627 ret void 4628} 4629 4630declare void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 5), ptr, <vscale x 4 x i64>, i64, i64) 4631declare void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i64.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 5), ptr, <vscale x 4 x i64>, <vscale x 4 x i1>, i64, i64) 4632 4633define void @test_vsoxseg5_nxv4i16_triscv.vector.tuple_nxv8i8_5t_nxv4i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, <vscale x 4 x i64> %index, i64 %vl) { 4634; CHECK-LABEL: test_vsoxseg5_nxv4i16_triscv.vector.tuple_nxv8i8_5t_nxv4i64: 4635; CHECK: # %bb.0: # %entry 4636; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma 4637; CHECK-NEXT: vsoxseg5ei64.v v8, (a0), v16 4638; CHECK-NEXT: ret 4639entry: 4640 tail call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, <vscale x 4 x i64> %index, i64 %vl, i64 4) 4641 ret void 4642} 4643 4644define void @test_vsoxseg5_mask_nxv4i16_triscv.vector.tuple_nxv8i8_5t_nxv4i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, <vscale x 4 x i64> %index, i64 %vl, <vscale x 4 x i1> %mask) { 4645; CHECK-LABEL: test_vsoxseg5_mask_nxv4i16_triscv.vector.tuple_nxv8i8_5t_nxv4i64: 4646; CHECK: # %bb.0: # %entry 4647; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma 4648; CHECK-NEXT: vsoxseg5ei64.v v8, (a0), v16, v0.t 4649; CHECK-NEXT: ret 4650entry: 4651 tail call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i64.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 4) 4652 ret void 4653} 4654 4655declare void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i8(target("riscv.vector.tuple", <vscale x 2 x i8>, 6), ptr, <vscale x 1 x i8>, i64, i64) 4656declare void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 6), ptr, <vscale x 1 x i8>, <vscale x 1 x i1>, i64, i64) 4657 4658define void @test_vsoxseg6_nxv1i16_triscv.vector.tuple_nxv2i8_6t_nxv1i8(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) %val, ptr %base, <vscale x 1 x i8> %index, i64 %vl) { 4659; CHECK-LABEL: test_vsoxseg6_nxv1i16_triscv.vector.tuple_nxv2i8_6t_nxv1i8: 4660; CHECK: # %bb.0: # %entry 4661; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma 4662; CHECK-NEXT: vsoxseg6ei8.v v8, (a0), v14 4663; CHECK-NEXT: ret 4664entry: 4665 tail call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i8(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) %val, ptr %base, <vscale x 1 x i8> %index, i64 %vl, i64 4) 4666 ret void 4667} 4668 4669define void @test_vsoxseg6_mask_nxv1i16_triscv.vector.tuple_nxv2i8_6t_nxv1i8(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) %val, ptr %base, <vscale x 1 x i8> %index, i64 %vl, <vscale x 1 x i1> %mask) { 4670; CHECK-LABEL: test_vsoxseg6_mask_nxv1i16_triscv.vector.tuple_nxv2i8_6t_nxv1i8: 4671; CHECK: # %bb.0: # %entry 4672; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma 4673; CHECK-NEXT: vsoxseg6ei8.v v8, (a0), v14, v0.t 4674; CHECK-NEXT: ret 4675entry: 4676 tail call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 4) 4677 ret void 4678} 4679 4680declare void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i16(target("riscv.vector.tuple", <vscale x 2 x i8>, 6), ptr, <vscale x 1 x i16>, i64, i64) 4681declare void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 6), ptr, <vscale x 1 x i16>, <vscale x 1 x i1>, i64, i64) 4682 4683define void @test_vsoxseg6_nxv1i16_triscv.vector.tuple_nxv2i8_6t_nxv1i16(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl) { 4684; CHECK-LABEL: test_vsoxseg6_nxv1i16_triscv.vector.tuple_nxv2i8_6t_nxv1i16: 4685; CHECK: # %bb.0: # %entry 4686; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma 4687; CHECK-NEXT: vsoxseg6ei16.v v8, (a0), v14 4688; CHECK-NEXT: ret 4689entry: 4690 tail call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i16(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl, i64 4) 4691 ret void 4692} 4693 4694define void @test_vsoxseg6_mask_nxv1i16_triscv.vector.tuple_nxv2i8_6t_nxv1i16(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl, <vscale x 1 x i1> %mask) { 4695; CHECK-LABEL: test_vsoxseg6_mask_nxv1i16_triscv.vector.tuple_nxv2i8_6t_nxv1i16: 4696; CHECK: # %bb.0: # %entry 4697; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma 4698; CHECK-NEXT: vsoxseg6ei16.v v8, (a0), v14, v0.t 4699; CHECK-NEXT: ret 4700entry: 4701 tail call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 4) 4702 ret void 4703} 4704 4705declare void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i32(target("riscv.vector.tuple", <vscale x 2 x i8>, 6), ptr, <vscale x 1 x i32>, i64, i64) 4706declare void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 6), ptr, <vscale x 1 x i32>, <vscale x 1 x i1>, i64, i64) 4707 4708define void @test_vsoxseg6_nxv1i16_triscv.vector.tuple_nxv2i8_6t_nxv1i32(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) %val, ptr %base, <vscale x 1 x i32> %index, i64 %vl) { 4709; CHECK-LABEL: test_vsoxseg6_nxv1i16_triscv.vector.tuple_nxv2i8_6t_nxv1i32: 4710; CHECK: # %bb.0: # %entry 4711; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma 4712; CHECK-NEXT: vsoxseg6ei32.v v8, (a0), v14 4713; CHECK-NEXT: ret 4714entry: 4715 tail call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i32(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) %val, ptr %base, <vscale x 1 x i32> %index, i64 %vl, i64 4) 4716 ret void 4717} 4718 4719define void @test_vsoxseg6_mask_nxv1i16_triscv.vector.tuple_nxv2i8_6t_nxv1i32(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) %val, ptr %base, <vscale x 1 x i32> %index, i64 %vl, <vscale x 1 x i1> %mask) { 4720; CHECK-LABEL: test_vsoxseg6_mask_nxv1i16_triscv.vector.tuple_nxv2i8_6t_nxv1i32: 4721; CHECK: # %bb.0: # %entry 4722; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma 4723; CHECK-NEXT: vsoxseg6ei32.v v8, (a0), v14, v0.t 4724; CHECK-NEXT: ret 4725entry: 4726 tail call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 4) 4727 ret void 4728} 4729 4730declare void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 6), ptr, <vscale x 1 x i64>, i64, i64) 4731declare void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i64.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 6), ptr, <vscale x 1 x i64>, <vscale x 1 x i1>, i64, i64) 4732 4733define void @test_vsoxseg6_nxv1i16_triscv.vector.tuple_nxv2i8_6t_nxv1i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) %val, ptr %base, <vscale x 1 x i64> %index, i64 %vl) { 4734; CHECK-LABEL: test_vsoxseg6_nxv1i16_triscv.vector.tuple_nxv2i8_6t_nxv1i64: 4735; CHECK: # %bb.0: # %entry 4736; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma 4737; CHECK-NEXT: vsoxseg6ei64.v v8, (a0), v14 4738; CHECK-NEXT: ret 4739entry: 4740 tail call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) %val, ptr %base, <vscale x 1 x i64> %index, i64 %vl, i64 4) 4741 ret void 4742} 4743 4744define void @test_vsoxseg6_mask_nxv1i16_triscv.vector.tuple_nxv2i8_6t_nxv1i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) %val, ptr %base, <vscale x 1 x i64> %index, i64 %vl, <vscale x 1 x i1> %mask) { 4745; CHECK-LABEL: test_vsoxseg6_mask_nxv1i16_triscv.vector.tuple_nxv2i8_6t_nxv1i64: 4746; CHECK: # %bb.0: # %entry 4747; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma 4748; CHECK-NEXT: vsoxseg6ei64.v v8, (a0), v14, v0.t 4749; CHECK-NEXT: ret 4750entry: 4751 tail call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i64.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) %val, ptr %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 4) 4752 ret void 4753} 4754 4755declare void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 6), ptr, <vscale x 2 x i8>, i64, i64) 4756declare void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 6), ptr, <vscale x 2 x i8>, <vscale x 2 x i1>, i64, i64) 4757 4758define void @test_vsoxseg6_nxv2i16_triscv.vector.tuple_nxv4i8_6t_nxv2i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) %val, ptr %base, <vscale x 2 x i8> %index, i64 %vl) { 4759; CHECK-LABEL: test_vsoxseg6_nxv2i16_triscv.vector.tuple_nxv4i8_6t_nxv2i8: 4760; CHECK: # %bb.0: # %entry 4761; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma 4762; CHECK-NEXT: vsoxseg6ei8.v v8, (a0), v14 4763; CHECK-NEXT: ret 4764entry: 4765 tail call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) %val, ptr %base, <vscale x 2 x i8> %index, i64 %vl, i64 4) 4766 ret void 4767} 4768 4769define void @test_vsoxseg6_mask_nxv2i16_triscv.vector.tuple_nxv4i8_6t_nxv2i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) %val, ptr %base, <vscale x 2 x i8> %index, i64 %vl, <vscale x 2 x i1> %mask) { 4770; CHECK-LABEL: test_vsoxseg6_mask_nxv2i16_triscv.vector.tuple_nxv4i8_6t_nxv2i8: 4771; CHECK: # %bb.0: # %entry 4772; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma 4773; CHECK-NEXT: vsoxseg6ei8.v v8, (a0), v14, v0.t 4774; CHECK-NEXT: ret 4775entry: 4776 tail call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 4) 4777 ret void 4778} 4779 4780declare void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 6), ptr, <vscale x 2 x i16>, i64, i64) 4781declare void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 6), ptr, <vscale x 2 x i16>, <vscale x 2 x i1>, i64, i64) 4782 4783define void @test_vsoxseg6_nxv2i16_triscv.vector.tuple_nxv4i8_6t_nxv2i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) %val, ptr %base, <vscale x 2 x i16> %index, i64 %vl) { 4784; CHECK-LABEL: test_vsoxseg6_nxv2i16_triscv.vector.tuple_nxv4i8_6t_nxv2i16: 4785; CHECK: # %bb.0: # %entry 4786; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma 4787; CHECK-NEXT: vsoxseg6ei16.v v8, (a0), v14 4788; CHECK-NEXT: ret 4789entry: 4790 tail call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) %val, ptr %base, <vscale x 2 x i16> %index, i64 %vl, i64 4) 4791 ret void 4792} 4793 4794define void @test_vsoxseg6_mask_nxv2i16_triscv.vector.tuple_nxv4i8_6t_nxv2i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) %val, ptr %base, <vscale x 2 x i16> %index, i64 %vl, <vscale x 2 x i1> %mask) { 4795; CHECK-LABEL: test_vsoxseg6_mask_nxv2i16_triscv.vector.tuple_nxv4i8_6t_nxv2i16: 4796; CHECK: # %bb.0: # %entry 4797; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma 4798; CHECK-NEXT: vsoxseg6ei16.v v8, (a0), v14, v0.t 4799; CHECK-NEXT: ret 4800entry: 4801 tail call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 4) 4802 ret void 4803} 4804 4805declare void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 6), ptr, <vscale x 2 x i32>, i64, i64) 4806declare void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 6), ptr, <vscale x 2 x i32>, <vscale x 2 x i1>, i64, i64) 4807 4808define void @test_vsoxseg6_nxv2i16_triscv.vector.tuple_nxv4i8_6t_nxv2i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) %val, ptr %base, <vscale x 2 x i32> %index, i64 %vl) { 4809; CHECK-LABEL: test_vsoxseg6_nxv2i16_triscv.vector.tuple_nxv4i8_6t_nxv2i32: 4810; CHECK: # %bb.0: # %entry 4811; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma 4812; CHECK-NEXT: vsoxseg6ei32.v v8, (a0), v14 4813; CHECK-NEXT: ret 4814entry: 4815 tail call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) %val, ptr %base, <vscale x 2 x i32> %index, i64 %vl, i64 4) 4816 ret void 4817} 4818 4819define void @test_vsoxseg6_mask_nxv2i16_triscv.vector.tuple_nxv4i8_6t_nxv2i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) %val, ptr %base, <vscale x 2 x i32> %index, i64 %vl, <vscale x 2 x i1> %mask) { 4820; CHECK-LABEL: test_vsoxseg6_mask_nxv2i16_triscv.vector.tuple_nxv4i8_6t_nxv2i32: 4821; CHECK: # %bb.0: # %entry 4822; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma 4823; CHECK-NEXT: vsoxseg6ei32.v v8, (a0), v14, v0.t 4824; CHECK-NEXT: ret 4825entry: 4826 tail call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 4) 4827 ret void 4828} 4829 4830declare void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 6), ptr, <vscale x 2 x i64>, i64, i64) 4831declare void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i64.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 6), ptr, <vscale x 2 x i64>, <vscale x 2 x i1>, i64, i64) 4832 4833define void @test_vsoxseg6_nxv2i16_triscv.vector.tuple_nxv4i8_6t_nxv2i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl) { 4834; CHECK-LABEL: test_vsoxseg6_nxv2i16_triscv.vector.tuple_nxv4i8_6t_nxv2i64: 4835; CHECK: # %bb.0: # %entry 4836; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma 4837; CHECK-NEXT: vsoxseg6ei64.v v8, (a0), v14 4838; CHECK-NEXT: ret 4839entry: 4840 tail call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl, i64 4) 4841 ret void 4842} 4843 4844define void @test_vsoxseg6_mask_nxv2i16_triscv.vector.tuple_nxv4i8_6t_nxv2i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl, <vscale x 2 x i1> %mask) { 4845; CHECK-LABEL: test_vsoxseg6_mask_nxv2i16_triscv.vector.tuple_nxv4i8_6t_nxv2i64: 4846; CHECK: # %bb.0: # %entry 4847; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma 4848; CHECK-NEXT: vsoxseg6ei64.v v8, (a0), v14, v0.t 4849; CHECK-NEXT: ret 4850entry: 4851 tail call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i64.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) %val, ptr %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 4) 4852 ret void 4853} 4854 4855declare void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 6), ptr, <vscale x 4 x i8>, i64, i64) 4856declare void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 6), ptr, <vscale x 4 x i8>, <vscale x 4 x i1>, i64, i64) 4857 4858define void @test_vsoxseg6_nxv4i16_triscv.vector.tuple_nxv8i8_6t_nxv4i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, <vscale x 4 x i8> %index, i64 %vl) { 4859; CHECK-LABEL: test_vsoxseg6_nxv4i16_triscv.vector.tuple_nxv8i8_6t_nxv4i8: 4860; CHECK: # %bb.0: # %entry 4861; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma 4862; CHECK-NEXT: vsoxseg6ei8.v v8, (a0), v14 4863; CHECK-NEXT: ret 4864entry: 4865 tail call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, <vscale x 4 x i8> %index, i64 %vl, i64 4) 4866 ret void 4867} 4868 4869define void @test_vsoxseg6_mask_nxv4i16_triscv.vector.tuple_nxv8i8_6t_nxv4i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, <vscale x 4 x i8> %index, i64 %vl, <vscale x 4 x i1> %mask) { 4870; CHECK-LABEL: test_vsoxseg6_mask_nxv4i16_triscv.vector.tuple_nxv8i8_6t_nxv4i8: 4871; CHECK: # %bb.0: # %entry 4872; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma 4873; CHECK-NEXT: vsoxseg6ei8.v v8, (a0), v14, v0.t 4874; CHECK-NEXT: ret 4875entry: 4876 tail call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 4) 4877 ret void 4878} 4879 4880declare void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 6), ptr, <vscale x 4 x i16>, i64, i64) 4881declare void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 6), ptr, <vscale x 4 x i16>, <vscale x 4 x i1>, i64, i64) 4882 4883define void @test_vsoxseg6_nxv4i16_triscv.vector.tuple_nxv8i8_6t_nxv4i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, <vscale x 4 x i16> %index, i64 %vl) { 4884; CHECK-LABEL: test_vsoxseg6_nxv4i16_triscv.vector.tuple_nxv8i8_6t_nxv4i16: 4885; CHECK: # %bb.0: # %entry 4886; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma 4887; CHECK-NEXT: vsoxseg6ei16.v v8, (a0), v14 4888; CHECK-NEXT: ret 4889entry: 4890 tail call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, <vscale x 4 x i16> %index, i64 %vl, i64 4) 4891 ret void 4892} 4893 4894define void @test_vsoxseg6_mask_nxv4i16_triscv.vector.tuple_nxv8i8_6t_nxv4i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, <vscale x 4 x i16> %index, i64 %vl, <vscale x 4 x i1> %mask) { 4895; CHECK-LABEL: test_vsoxseg6_mask_nxv4i16_triscv.vector.tuple_nxv8i8_6t_nxv4i16: 4896; CHECK: # %bb.0: # %entry 4897; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma 4898; CHECK-NEXT: vsoxseg6ei16.v v8, (a0), v14, v0.t 4899; CHECK-NEXT: ret 4900entry: 4901 tail call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 4) 4902 ret void 4903} 4904 4905declare void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 6), ptr, <vscale x 4 x i32>, i64, i64) 4906declare void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 6), ptr, <vscale x 4 x i32>, <vscale x 4 x i1>, i64, i64) 4907 4908define void @test_vsoxseg6_nxv4i16_triscv.vector.tuple_nxv8i8_6t_nxv4i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, <vscale x 4 x i32> %index, i64 %vl) { 4909; CHECK-LABEL: test_vsoxseg6_nxv4i16_triscv.vector.tuple_nxv8i8_6t_nxv4i32: 4910; CHECK: # %bb.0: # %entry 4911; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma 4912; CHECK-NEXT: vsoxseg6ei32.v v8, (a0), v14 4913; CHECK-NEXT: ret 4914entry: 4915 tail call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, <vscale x 4 x i32> %index, i64 %vl, i64 4) 4916 ret void 4917} 4918 4919define void @test_vsoxseg6_mask_nxv4i16_triscv.vector.tuple_nxv8i8_6t_nxv4i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, <vscale x 4 x i32> %index, i64 %vl, <vscale x 4 x i1> %mask) { 4920; CHECK-LABEL: test_vsoxseg6_mask_nxv4i16_triscv.vector.tuple_nxv8i8_6t_nxv4i32: 4921; CHECK: # %bb.0: # %entry 4922; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma 4923; CHECK-NEXT: vsoxseg6ei32.v v8, (a0), v14, v0.t 4924; CHECK-NEXT: ret 4925entry: 4926 tail call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 4) 4927 ret void 4928} 4929 4930declare void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 6), ptr, <vscale x 4 x i64>, i64, i64) 4931declare void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i64.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 6), ptr, <vscale x 4 x i64>, <vscale x 4 x i1>, i64, i64) 4932 4933define void @test_vsoxseg6_nxv4i16_triscv.vector.tuple_nxv8i8_6t_nxv4i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, <vscale x 4 x i64> %index, i64 %vl) { 4934; CHECK-LABEL: test_vsoxseg6_nxv4i16_triscv.vector.tuple_nxv8i8_6t_nxv4i64: 4935; CHECK: # %bb.0: # %entry 4936; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma 4937; CHECK-NEXT: vsoxseg6ei64.v v8, (a0), v16 4938; CHECK-NEXT: ret 4939entry: 4940 tail call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, <vscale x 4 x i64> %index, i64 %vl, i64 4) 4941 ret void 4942} 4943 4944define void @test_vsoxseg6_mask_nxv4i16_triscv.vector.tuple_nxv8i8_6t_nxv4i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, <vscale x 4 x i64> %index, i64 %vl, <vscale x 4 x i1> %mask) { 4945; CHECK-LABEL: test_vsoxseg6_mask_nxv4i16_triscv.vector.tuple_nxv8i8_6t_nxv4i64: 4946; CHECK: # %bb.0: # %entry 4947; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma 4948; CHECK-NEXT: vsoxseg6ei64.v v8, (a0), v16, v0.t 4949; CHECK-NEXT: ret 4950entry: 4951 tail call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i64.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 4) 4952 ret void 4953} 4954 4955declare void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i8(target("riscv.vector.tuple", <vscale x 2 x i8>, 7), ptr, <vscale x 1 x i8>, i64, i64) 4956declare void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 7), ptr, <vscale x 1 x i8>, <vscale x 1 x i1>, i64, i64) 4957 4958define void @test_vsoxseg7_nxv1i16_triscv.vector.tuple_nxv2i8_7t_nxv1i8(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) %val, ptr %base, <vscale x 1 x i8> %index, i64 %vl) { 4959; CHECK-LABEL: test_vsoxseg7_nxv1i16_triscv.vector.tuple_nxv2i8_7t_nxv1i8: 4960; CHECK: # %bb.0: # %entry 4961; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma 4962; CHECK-NEXT: vsoxseg7ei8.v v8, (a0), v15 4963; CHECK-NEXT: ret 4964entry: 4965 tail call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i8(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) %val, ptr %base, <vscale x 1 x i8> %index, i64 %vl, i64 4) 4966 ret void 4967} 4968 4969define void @test_vsoxseg7_mask_nxv1i16_triscv.vector.tuple_nxv2i8_7t_nxv1i8(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) %val, ptr %base, <vscale x 1 x i8> %index, i64 %vl, <vscale x 1 x i1> %mask) { 4970; CHECK-LABEL: test_vsoxseg7_mask_nxv1i16_triscv.vector.tuple_nxv2i8_7t_nxv1i8: 4971; CHECK: # %bb.0: # %entry 4972; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma 4973; CHECK-NEXT: vsoxseg7ei8.v v8, (a0), v15, v0.t 4974; CHECK-NEXT: ret 4975entry: 4976 tail call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 4) 4977 ret void 4978} 4979 4980declare void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i16(target("riscv.vector.tuple", <vscale x 2 x i8>, 7), ptr, <vscale x 1 x i16>, i64, i64) 4981declare void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 7), ptr, <vscale x 1 x i16>, <vscale x 1 x i1>, i64, i64) 4982 4983define void @test_vsoxseg7_nxv1i16_triscv.vector.tuple_nxv2i8_7t_nxv1i16(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl) { 4984; CHECK-LABEL: test_vsoxseg7_nxv1i16_triscv.vector.tuple_nxv2i8_7t_nxv1i16: 4985; CHECK: # %bb.0: # %entry 4986; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma 4987; CHECK-NEXT: vsoxseg7ei16.v v8, (a0), v15 4988; CHECK-NEXT: ret 4989entry: 4990 tail call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i16(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl, i64 4) 4991 ret void 4992} 4993 4994define void @test_vsoxseg7_mask_nxv1i16_triscv.vector.tuple_nxv2i8_7t_nxv1i16(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl, <vscale x 1 x i1> %mask) { 4995; CHECK-LABEL: test_vsoxseg7_mask_nxv1i16_triscv.vector.tuple_nxv2i8_7t_nxv1i16: 4996; CHECK: # %bb.0: # %entry 4997; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma 4998; CHECK-NEXT: vsoxseg7ei16.v v8, (a0), v15, v0.t 4999; CHECK-NEXT: ret 5000entry: 5001 tail call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 4) 5002 ret void 5003} 5004 5005declare void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i32(target("riscv.vector.tuple", <vscale x 2 x i8>, 7), ptr, <vscale x 1 x i32>, i64, i64) 5006declare void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 7), ptr, <vscale x 1 x i32>, <vscale x 1 x i1>, i64, i64) 5007 5008define void @test_vsoxseg7_nxv1i16_triscv.vector.tuple_nxv2i8_7t_nxv1i32(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) %val, ptr %base, <vscale x 1 x i32> %index, i64 %vl) { 5009; CHECK-LABEL: test_vsoxseg7_nxv1i16_triscv.vector.tuple_nxv2i8_7t_nxv1i32: 5010; CHECK: # %bb.0: # %entry 5011; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma 5012; CHECK-NEXT: vsoxseg7ei32.v v8, (a0), v15 5013; CHECK-NEXT: ret 5014entry: 5015 tail call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i32(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) %val, ptr %base, <vscale x 1 x i32> %index, i64 %vl, i64 4) 5016 ret void 5017} 5018 5019define void @test_vsoxseg7_mask_nxv1i16_triscv.vector.tuple_nxv2i8_7t_nxv1i32(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) %val, ptr %base, <vscale x 1 x i32> %index, i64 %vl, <vscale x 1 x i1> %mask) { 5020; CHECK-LABEL: test_vsoxseg7_mask_nxv1i16_triscv.vector.tuple_nxv2i8_7t_nxv1i32: 5021; CHECK: # %bb.0: # %entry 5022; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma 5023; CHECK-NEXT: vsoxseg7ei32.v v8, (a0), v15, v0.t 5024; CHECK-NEXT: ret 5025entry: 5026 tail call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 4) 5027 ret void 5028} 5029 5030declare void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 7), ptr, <vscale x 1 x i64>, i64, i64) 5031declare void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i64.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 7), ptr, <vscale x 1 x i64>, <vscale x 1 x i1>, i64, i64) 5032 5033define void @test_vsoxseg7_nxv1i16_triscv.vector.tuple_nxv2i8_7t_nxv1i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) %val, ptr %base, <vscale x 1 x i64> %index, i64 %vl) { 5034; CHECK-LABEL: test_vsoxseg7_nxv1i16_triscv.vector.tuple_nxv2i8_7t_nxv1i64: 5035; CHECK: # %bb.0: # %entry 5036; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma 5037; CHECK-NEXT: vsoxseg7ei64.v v8, (a0), v15 5038; CHECK-NEXT: ret 5039entry: 5040 tail call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) %val, ptr %base, <vscale x 1 x i64> %index, i64 %vl, i64 4) 5041 ret void 5042} 5043 5044define void @test_vsoxseg7_mask_nxv1i16_triscv.vector.tuple_nxv2i8_7t_nxv1i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) %val, ptr %base, <vscale x 1 x i64> %index, i64 %vl, <vscale x 1 x i1> %mask) { 5045; CHECK-LABEL: test_vsoxseg7_mask_nxv1i16_triscv.vector.tuple_nxv2i8_7t_nxv1i64: 5046; CHECK: # %bb.0: # %entry 5047; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma 5048; CHECK-NEXT: vsoxseg7ei64.v v8, (a0), v15, v0.t 5049; CHECK-NEXT: ret 5050entry: 5051 tail call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i64.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) %val, ptr %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 4) 5052 ret void 5053} 5054 5055declare void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 7), ptr, <vscale x 2 x i8>, i64, i64) 5056declare void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 7), ptr, <vscale x 2 x i8>, <vscale x 2 x i1>, i64, i64) 5057 5058define void @test_vsoxseg7_nxv2i16_triscv.vector.tuple_nxv4i8_7t_nxv2i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) %val, ptr %base, <vscale x 2 x i8> %index, i64 %vl) { 5059; CHECK-LABEL: test_vsoxseg7_nxv2i16_triscv.vector.tuple_nxv4i8_7t_nxv2i8: 5060; CHECK: # %bb.0: # %entry 5061; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma 5062; CHECK-NEXT: vsoxseg7ei8.v v8, (a0), v15 5063; CHECK-NEXT: ret 5064entry: 5065 tail call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) %val, ptr %base, <vscale x 2 x i8> %index, i64 %vl, i64 4) 5066 ret void 5067} 5068 5069define void @test_vsoxseg7_mask_nxv2i16_triscv.vector.tuple_nxv4i8_7t_nxv2i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) %val, ptr %base, <vscale x 2 x i8> %index, i64 %vl, <vscale x 2 x i1> %mask) { 5070; CHECK-LABEL: test_vsoxseg7_mask_nxv2i16_triscv.vector.tuple_nxv4i8_7t_nxv2i8: 5071; CHECK: # %bb.0: # %entry 5072; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma 5073; CHECK-NEXT: vsoxseg7ei8.v v8, (a0), v15, v0.t 5074; CHECK-NEXT: ret 5075entry: 5076 tail call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 4) 5077 ret void 5078} 5079 5080declare void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 7), ptr, <vscale x 2 x i16>, i64, i64) 5081declare void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 7), ptr, <vscale x 2 x i16>, <vscale x 2 x i1>, i64, i64) 5082 5083define void @test_vsoxseg7_nxv2i16_triscv.vector.tuple_nxv4i8_7t_nxv2i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) %val, ptr %base, <vscale x 2 x i16> %index, i64 %vl) { 5084; CHECK-LABEL: test_vsoxseg7_nxv2i16_triscv.vector.tuple_nxv4i8_7t_nxv2i16: 5085; CHECK: # %bb.0: # %entry 5086; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma 5087; CHECK-NEXT: vsoxseg7ei16.v v8, (a0), v15 5088; CHECK-NEXT: ret 5089entry: 5090 tail call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) %val, ptr %base, <vscale x 2 x i16> %index, i64 %vl, i64 4) 5091 ret void 5092} 5093 5094define void @test_vsoxseg7_mask_nxv2i16_triscv.vector.tuple_nxv4i8_7t_nxv2i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) %val, ptr %base, <vscale x 2 x i16> %index, i64 %vl, <vscale x 2 x i1> %mask) { 5095; CHECK-LABEL: test_vsoxseg7_mask_nxv2i16_triscv.vector.tuple_nxv4i8_7t_nxv2i16: 5096; CHECK: # %bb.0: # %entry 5097; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma 5098; CHECK-NEXT: vsoxseg7ei16.v v8, (a0), v15, v0.t 5099; CHECK-NEXT: ret 5100entry: 5101 tail call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 4) 5102 ret void 5103} 5104 5105declare void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 7), ptr, <vscale x 2 x i32>, i64, i64) 5106declare void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 7), ptr, <vscale x 2 x i32>, <vscale x 2 x i1>, i64, i64) 5107 5108define void @test_vsoxseg7_nxv2i16_triscv.vector.tuple_nxv4i8_7t_nxv2i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) %val, ptr %base, <vscale x 2 x i32> %index, i64 %vl) { 5109; CHECK-LABEL: test_vsoxseg7_nxv2i16_triscv.vector.tuple_nxv4i8_7t_nxv2i32: 5110; CHECK: # %bb.0: # %entry 5111; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma 5112; CHECK-NEXT: vsoxseg7ei32.v v8, (a0), v15 5113; CHECK-NEXT: ret 5114entry: 5115 tail call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) %val, ptr %base, <vscale x 2 x i32> %index, i64 %vl, i64 4) 5116 ret void 5117} 5118 5119define void @test_vsoxseg7_mask_nxv2i16_triscv.vector.tuple_nxv4i8_7t_nxv2i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) %val, ptr %base, <vscale x 2 x i32> %index, i64 %vl, <vscale x 2 x i1> %mask) { 5120; CHECK-LABEL: test_vsoxseg7_mask_nxv2i16_triscv.vector.tuple_nxv4i8_7t_nxv2i32: 5121; CHECK: # %bb.0: # %entry 5122; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma 5123; CHECK-NEXT: vsoxseg7ei32.v v8, (a0), v15, v0.t 5124; CHECK-NEXT: ret 5125entry: 5126 tail call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 4) 5127 ret void 5128} 5129 5130declare void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 7), ptr, <vscale x 2 x i64>, i64, i64) 5131declare void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i64.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 7), ptr, <vscale x 2 x i64>, <vscale x 2 x i1>, i64, i64) 5132 5133define void @test_vsoxseg7_nxv2i16_triscv.vector.tuple_nxv4i8_7t_nxv2i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl) { 5134; CHECK-LABEL: test_vsoxseg7_nxv2i16_triscv.vector.tuple_nxv4i8_7t_nxv2i64: 5135; CHECK: # %bb.0: # %entry 5136; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma 5137; CHECK-NEXT: vsoxseg7ei64.v v8, (a0), v16 5138; CHECK-NEXT: ret 5139entry: 5140 tail call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl, i64 4) 5141 ret void 5142} 5143 5144define void @test_vsoxseg7_mask_nxv2i16_triscv.vector.tuple_nxv4i8_7t_nxv2i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl, <vscale x 2 x i1> %mask) { 5145; CHECK-LABEL: test_vsoxseg7_mask_nxv2i16_triscv.vector.tuple_nxv4i8_7t_nxv2i64: 5146; CHECK: # %bb.0: # %entry 5147; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma 5148; CHECK-NEXT: vsoxseg7ei64.v v8, (a0), v16, v0.t 5149; CHECK-NEXT: ret 5150entry: 5151 tail call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i64.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) %val, ptr %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 4) 5152 ret void 5153} 5154 5155declare void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 7), ptr, <vscale x 4 x i8>, i64, i64) 5156declare void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 7), ptr, <vscale x 4 x i8>, <vscale x 4 x i1>, i64, i64) 5157 5158define void @test_vsoxseg7_nxv4i16_triscv.vector.tuple_nxv8i8_7t_nxv4i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, <vscale x 4 x i8> %index, i64 %vl) { 5159; CHECK-LABEL: test_vsoxseg7_nxv4i16_triscv.vector.tuple_nxv8i8_7t_nxv4i8: 5160; CHECK: # %bb.0: # %entry 5161; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma 5162; CHECK-NEXT: vsoxseg7ei8.v v8, (a0), v15 5163; CHECK-NEXT: ret 5164entry: 5165 tail call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, <vscale x 4 x i8> %index, i64 %vl, i64 4) 5166 ret void 5167} 5168 5169define void @test_vsoxseg7_mask_nxv4i16_triscv.vector.tuple_nxv8i8_7t_nxv4i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, <vscale x 4 x i8> %index, i64 %vl, <vscale x 4 x i1> %mask) { 5170; CHECK-LABEL: test_vsoxseg7_mask_nxv4i16_triscv.vector.tuple_nxv8i8_7t_nxv4i8: 5171; CHECK: # %bb.0: # %entry 5172; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma 5173; CHECK-NEXT: vsoxseg7ei8.v v8, (a0), v15, v0.t 5174; CHECK-NEXT: ret 5175entry: 5176 tail call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 4) 5177 ret void 5178} 5179 5180declare void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 7), ptr, <vscale x 4 x i16>, i64, i64) 5181declare void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 7), ptr, <vscale x 4 x i16>, <vscale x 4 x i1>, i64, i64) 5182 5183define void @test_vsoxseg7_nxv4i16_triscv.vector.tuple_nxv8i8_7t_nxv4i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, <vscale x 4 x i16> %index, i64 %vl) { 5184; CHECK-LABEL: test_vsoxseg7_nxv4i16_triscv.vector.tuple_nxv8i8_7t_nxv4i16: 5185; CHECK: # %bb.0: # %entry 5186; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma 5187; CHECK-NEXT: vsoxseg7ei16.v v8, (a0), v15 5188; CHECK-NEXT: ret 5189entry: 5190 tail call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, <vscale x 4 x i16> %index, i64 %vl, i64 4) 5191 ret void 5192} 5193 5194define void @test_vsoxseg7_mask_nxv4i16_triscv.vector.tuple_nxv8i8_7t_nxv4i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, <vscale x 4 x i16> %index, i64 %vl, <vscale x 4 x i1> %mask) { 5195; CHECK-LABEL: test_vsoxseg7_mask_nxv4i16_triscv.vector.tuple_nxv8i8_7t_nxv4i16: 5196; CHECK: # %bb.0: # %entry 5197; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma 5198; CHECK-NEXT: vsoxseg7ei16.v v8, (a0), v15, v0.t 5199; CHECK-NEXT: ret 5200entry: 5201 tail call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 4) 5202 ret void 5203} 5204 5205declare void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 7), ptr, <vscale x 4 x i32>, i64, i64) 5206declare void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 7), ptr, <vscale x 4 x i32>, <vscale x 4 x i1>, i64, i64) 5207 5208define void @test_vsoxseg7_nxv4i16_triscv.vector.tuple_nxv8i8_7t_nxv4i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, <vscale x 4 x i32> %index, i64 %vl) { 5209; CHECK-LABEL: test_vsoxseg7_nxv4i16_triscv.vector.tuple_nxv8i8_7t_nxv4i32: 5210; CHECK: # %bb.0: # %entry 5211; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma 5212; CHECK-NEXT: vsoxseg7ei32.v v8, (a0), v16 5213; CHECK-NEXT: ret 5214entry: 5215 tail call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, <vscale x 4 x i32> %index, i64 %vl, i64 4) 5216 ret void 5217} 5218 5219define void @test_vsoxseg7_mask_nxv4i16_triscv.vector.tuple_nxv8i8_7t_nxv4i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, <vscale x 4 x i32> %index, i64 %vl, <vscale x 4 x i1> %mask) { 5220; CHECK-LABEL: test_vsoxseg7_mask_nxv4i16_triscv.vector.tuple_nxv8i8_7t_nxv4i32: 5221; CHECK: # %bb.0: # %entry 5222; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma 5223; CHECK-NEXT: vsoxseg7ei32.v v8, (a0), v16, v0.t 5224; CHECK-NEXT: ret 5225entry: 5226 tail call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 4) 5227 ret void 5228} 5229 5230declare void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 7), ptr, <vscale x 4 x i64>, i64, i64) 5231declare void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i64.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 7), ptr, <vscale x 4 x i64>, <vscale x 4 x i1>, i64, i64) 5232 5233define void @test_vsoxseg7_nxv4i16_triscv.vector.tuple_nxv8i8_7t_nxv4i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, <vscale x 4 x i64> %index, i64 %vl) { 5234; CHECK-LABEL: test_vsoxseg7_nxv4i16_triscv.vector.tuple_nxv8i8_7t_nxv4i64: 5235; CHECK: # %bb.0: # %entry 5236; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma 5237; CHECK-NEXT: vsoxseg7ei64.v v8, (a0), v16 5238; CHECK-NEXT: ret 5239entry: 5240 tail call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, <vscale x 4 x i64> %index, i64 %vl, i64 4) 5241 ret void 5242} 5243 5244define void @test_vsoxseg7_mask_nxv4i16_triscv.vector.tuple_nxv8i8_7t_nxv4i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, <vscale x 4 x i64> %index, i64 %vl, <vscale x 4 x i1> %mask) { 5245; CHECK-LABEL: test_vsoxseg7_mask_nxv4i16_triscv.vector.tuple_nxv8i8_7t_nxv4i64: 5246; CHECK: # %bb.0: # %entry 5247; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma 5248; CHECK-NEXT: vsoxseg7ei64.v v8, (a0), v16, v0.t 5249; CHECK-NEXT: ret 5250entry: 5251 tail call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i64.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 4) 5252 ret void 5253} 5254 5255declare void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i8(target("riscv.vector.tuple", <vscale x 2 x i8>, 8), ptr, <vscale x 1 x i8>, i64, i64) 5256declare void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 8), ptr, <vscale x 1 x i8>, <vscale x 1 x i1>, i64, i64) 5257 5258define void @test_vsoxseg8_nxv1i16_triscv.vector.tuple_nxv2i8_8t_nxv1i8(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) %val, ptr %base, <vscale x 1 x i8> %index, i64 %vl) { 5259; CHECK-LABEL: test_vsoxseg8_nxv1i16_triscv.vector.tuple_nxv2i8_8t_nxv1i8: 5260; CHECK: # %bb.0: # %entry 5261; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma 5262; CHECK-NEXT: vsoxseg8ei8.v v8, (a0), v16 5263; CHECK-NEXT: ret 5264entry: 5265 tail call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i8(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) %val, ptr %base, <vscale x 1 x i8> %index, i64 %vl, i64 4) 5266 ret void 5267} 5268 5269define void @test_vsoxseg8_mask_nxv1i16_triscv.vector.tuple_nxv2i8_8t_nxv1i8(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) %val, ptr %base, <vscale x 1 x i8> %index, i64 %vl, <vscale x 1 x i1> %mask) { 5270; CHECK-LABEL: test_vsoxseg8_mask_nxv1i16_triscv.vector.tuple_nxv2i8_8t_nxv1i8: 5271; CHECK: # %bb.0: # %entry 5272; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma 5273; CHECK-NEXT: vsoxseg8ei8.v v8, (a0), v16, v0.t 5274; CHECK-NEXT: ret 5275entry: 5276 tail call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 4) 5277 ret void 5278} 5279 5280declare void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i16(target("riscv.vector.tuple", <vscale x 2 x i8>, 8), ptr, <vscale x 1 x i16>, i64, i64) 5281declare void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 8), ptr, <vscale x 1 x i16>, <vscale x 1 x i1>, i64, i64) 5282 5283define void @test_vsoxseg8_nxv1i16_triscv.vector.tuple_nxv2i8_8t_nxv1i16(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl) { 5284; CHECK-LABEL: test_vsoxseg8_nxv1i16_triscv.vector.tuple_nxv2i8_8t_nxv1i16: 5285; CHECK: # %bb.0: # %entry 5286; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma 5287; CHECK-NEXT: vsoxseg8ei16.v v8, (a0), v16 5288; CHECK-NEXT: ret 5289entry: 5290 tail call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i16(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl, i64 4) 5291 ret void 5292} 5293 5294define void @test_vsoxseg8_mask_nxv1i16_triscv.vector.tuple_nxv2i8_8t_nxv1i16(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl, <vscale x 1 x i1> %mask) { 5295; CHECK-LABEL: test_vsoxseg8_mask_nxv1i16_triscv.vector.tuple_nxv2i8_8t_nxv1i16: 5296; CHECK: # %bb.0: # %entry 5297; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma 5298; CHECK-NEXT: vsoxseg8ei16.v v8, (a0), v16, v0.t 5299; CHECK-NEXT: ret 5300entry: 5301 tail call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 4) 5302 ret void 5303} 5304 5305declare void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i32(target("riscv.vector.tuple", <vscale x 2 x i8>, 8), ptr, <vscale x 1 x i32>, i64, i64) 5306declare void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 8), ptr, <vscale x 1 x i32>, <vscale x 1 x i1>, i64, i64) 5307 5308define void @test_vsoxseg8_nxv1i16_triscv.vector.tuple_nxv2i8_8t_nxv1i32(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) %val, ptr %base, <vscale x 1 x i32> %index, i64 %vl) { 5309; CHECK-LABEL: test_vsoxseg8_nxv1i16_triscv.vector.tuple_nxv2i8_8t_nxv1i32: 5310; CHECK: # %bb.0: # %entry 5311; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma 5312; CHECK-NEXT: vsoxseg8ei32.v v8, (a0), v16 5313; CHECK-NEXT: ret 5314entry: 5315 tail call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i32(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) %val, ptr %base, <vscale x 1 x i32> %index, i64 %vl, i64 4) 5316 ret void 5317} 5318 5319define void @test_vsoxseg8_mask_nxv1i16_triscv.vector.tuple_nxv2i8_8t_nxv1i32(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) %val, ptr %base, <vscale x 1 x i32> %index, i64 %vl, <vscale x 1 x i1> %mask) { 5320; CHECK-LABEL: test_vsoxseg8_mask_nxv1i16_triscv.vector.tuple_nxv2i8_8t_nxv1i32: 5321; CHECK: # %bb.0: # %entry 5322; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma 5323; CHECK-NEXT: vsoxseg8ei32.v v8, (a0), v16, v0.t 5324; CHECK-NEXT: ret 5325entry: 5326 tail call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 4) 5327 ret void 5328} 5329 5330declare void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 8), ptr, <vscale x 1 x i64>, i64, i64) 5331declare void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i64.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 8), ptr, <vscale x 1 x i64>, <vscale x 1 x i1>, i64, i64) 5332 5333define void @test_vsoxseg8_nxv1i16_triscv.vector.tuple_nxv2i8_8t_nxv1i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) %val, ptr %base, <vscale x 1 x i64> %index, i64 %vl) { 5334; CHECK-LABEL: test_vsoxseg8_nxv1i16_triscv.vector.tuple_nxv2i8_8t_nxv1i64: 5335; CHECK: # %bb.0: # %entry 5336; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma 5337; CHECK-NEXT: vsoxseg8ei64.v v8, (a0), v16 5338; CHECK-NEXT: ret 5339entry: 5340 tail call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) %val, ptr %base, <vscale x 1 x i64> %index, i64 %vl, i64 4) 5341 ret void 5342} 5343 5344define void @test_vsoxseg8_mask_nxv1i16_triscv.vector.tuple_nxv2i8_8t_nxv1i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) %val, ptr %base, <vscale x 1 x i64> %index, i64 %vl, <vscale x 1 x i1> %mask) { 5345; CHECK-LABEL: test_vsoxseg8_mask_nxv1i16_triscv.vector.tuple_nxv2i8_8t_nxv1i64: 5346; CHECK: # %bb.0: # %entry 5347; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma 5348; CHECK-NEXT: vsoxseg8ei64.v v8, (a0), v16, v0.t 5349; CHECK-NEXT: ret 5350entry: 5351 tail call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i64.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) %val, ptr %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 4) 5352 ret void 5353} 5354 5355declare void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 8), ptr, <vscale x 2 x i8>, i64, i64) 5356declare void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 8), ptr, <vscale x 2 x i8>, <vscale x 2 x i1>, i64, i64) 5357 5358define void @test_vsoxseg8_nxv2i16_triscv.vector.tuple_nxv4i8_8t_nxv2i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %val, ptr %base, <vscale x 2 x i8> %index, i64 %vl) { 5359; CHECK-LABEL: test_vsoxseg8_nxv2i16_triscv.vector.tuple_nxv4i8_8t_nxv2i8: 5360; CHECK: # %bb.0: # %entry 5361; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma 5362; CHECK-NEXT: vsoxseg8ei8.v v8, (a0), v16 5363; CHECK-NEXT: ret 5364entry: 5365 tail call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %val, ptr %base, <vscale x 2 x i8> %index, i64 %vl, i64 4) 5366 ret void 5367} 5368 5369define void @test_vsoxseg8_mask_nxv2i16_triscv.vector.tuple_nxv4i8_8t_nxv2i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %val, ptr %base, <vscale x 2 x i8> %index, i64 %vl, <vscale x 2 x i1> %mask) { 5370; CHECK-LABEL: test_vsoxseg8_mask_nxv2i16_triscv.vector.tuple_nxv4i8_8t_nxv2i8: 5371; CHECK: # %bb.0: # %entry 5372; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma 5373; CHECK-NEXT: vsoxseg8ei8.v v8, (a0), v16, v0.t 5374; CHECK-NEXT: ret 5375entry: 5376 tail call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 4) 5377 ret void 5378} 5379 5380declare void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 8), ptr, <vscale x 2 x i16>, i64, i64) 5381declare void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 8), ptr, <vscale x 2 x i16>, <vscale x 2 x i1>, i64, i64) 5382 5383define void @test_vsoxseg8_nxv2i16_triscv.vector.tuple_nxv4i8_8t_nxv2i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %val, ptr %base, <vscale x 2 x i16> %index, i64 %vl) { 5384; CHECK-LABEL: test_vsoxseg8_nxv2i16_triscv.vector.tuple_nxv4i8_8t_nxv2i16: 5385; CHECK: # %bb.0: # %entry 5386; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma 5387; CHECK-NEXT: vsoxseg8ei16.v v8, (a0), v16 5388; CHECK-NEXT: ret 5389entry: 5390 tail call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %val, ptr %base, <vscale x 2 x i16> %index, i64 %vl, i64 4) 5391 ret void 5392} 5393 5394define void @test_vsoxseg8_mask_nxv2i16_triscv.vector.tuple_nxv4i8_8t_nxv2i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %val, ptr %base, <vscale x 2 x i16> %index, i64 %vl, <vscale x 2 x i1> %mask) { 5395; CHECK-LABEL: test_vsoxseg8_mask_nxv2i16_triscv.vector.tuple_nxv4i8_8t_nxv2i16: 5396; CHECK: # %bb.0: # %entry 5397; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma 5398; CHECK-NEXT: vsoxseg8ei16.v v8, (a0), v16, v0.t 5399; CHECK-NEXT: ret 5400entry: 5401 tail call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 4) 5402 ret void 5403} 5404 5405declare void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 8), ptr, <vscale x 2 x i32>, i64, i64) 5406declare void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 8), ptr, <vscale x 2 x i32>, <vscale x 2 x i1>, i64, i64) 5407 5408define void @test_vsoxseg8_nxv2i16_triscv.vector.tuple_nxv4i8_8t_nxv2i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %val, ptr %base, <vscale x 2 x i32> %index, i64 %vl) { 5409; CHECK-LABEL: test_vsoxseg8_nxv2i16_triscv.vector.tuple_nxv4i8_8t_nxv2i32: 5410; CHECK: # %bb.0: # %entry 5411; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma 5412; CHECK-NEXT: vsoxseg8ei32.v v8, (a0), v16 5413; CHECK-NEXT: ret 5414entry: 5415 tail call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %val, ptr %base, <vscale x 2 x i32> %index, i64 %vl, i64 4) 5416 ret void 5417} 5418 5419define void @test_vsoxseg8_mask_nxv2i16_triscv.vector.tuple_nxv4i8_8t_nxv2i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %val, ptr %base, <vscale x 2 x i32> %index, i64 %vl, <vscale x 2 x i1> %mask) { 5420; CHECK-LABEL: test_vsoxseg8_mask_nxv2i16_triscv.vector.tuple_nxv4i8_8t_nxv2i32: 5421; CHECK: # %bb.0: # %entry 5422; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma 5423; CHECK-NEXT: vsoxseg8ei32.v v8, (a0), v16, v0.t 5424; CHECK-NEXT: ret 5425entry: 5426 tail call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 4) 5427 ret void 5428} 5429 5430declare void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 8), ptr, <vscale x 2 x i64>, i64, i64) 5431declare void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i64.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 8), ptr, <vscale x 2 x i64>, <vscale x 2 x i1>, i64, i64) 5432 5433define void @test_vsoxseg8_nxv2i16_triscv.vector.tuple_nxv4i8_8t_nxv2i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl) { 5434; CHECK-LABEL: test_vsoxseg8_nxv2i16_triscv.vector.tuple_nxv4i8_8t_nxv2i64: 5435; CHECK: # %bb.0: # %entry 5436; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma 5437; CHECK-NEXT: vsoxseg8ei64.v v8, (a0), v16 5438; CHECK-NEXT: ret 5439entry: 5440 tail call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl, i64 4) 5441 ret void 5442} 5443 5444define void @test_vsoxseg8_mask_nxv2i16_triscv.vector.tuple_nxv4i8_8t_nxv2i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl, <vscale x 2 x i1> %mask) { 5445; CHECK-LABEL: test_vsoxseg8_mask_nxv2i16_triscv.vector.tuple_nxv4i8_8t_nxv2i64: 5446; CHECK: # %bb.0: # %entry 5447; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma 5448; CHECK-NEXT: vsoxseg8ei64.v v8, (a0), v16, v0.t 5449; CHECK-NEXT: ret 5450entry: 5451 tail call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i64.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %val, ptr %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 4) 5452 ret void 5453} 5454 5455declare void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 8), ptr, <vscale x 4 x i8>, i64, i64) 5456declare void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 8), ptr, <vscale x 4 x i8>, <vscale x 4 x i1>, i64, i64) 5457 5458define void @test_vsoxseg8_nxv4i16_triscv.vector.tuple_nxv8i8_8t_nxv4i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, <vscale x 4 x i8> %index, i64 %vl) { 5459; CHECK-LABEL: test_vsoxseg8_nxv4i16_triscv.vector.tuple_nxv8i8_8t_nxv4i8: 5460; CHECK: # %bb.0: # %entry 5461; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma 5462; CHECK-NEXT: vsoxseg8ei8.v v8, (a0), v16 5463; CHECK-NEXT: ret 5464entry: 5465 tail call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, <vscale x 4 x i8> %index, i64 %vl, i64 4) 5466 ret void 5467} 5468 5469define void @test_vsoxseg8_mask_nxv4i16_triscv.vector.tuple_nxv8i8_8t_nxv4i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, <vscale x 4 x i8> %index, i64 %vl, <vscale x 4 x i1> %mask) { 5470; CHECK-LABEL: test_vsoxseg8_mask_nxv4i16_triscv.vector.tuple_nxv8i8_8t_nxv4i8: 5471; CHECK: # %bb.0: # %entry 5472; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma 5473; CHECK-NEXT: vsoxseg8ei8.v v8, (a0), v16, v0.t 5474; CHECK-NEXT: ret 5475entry: 5476 tail call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 4) 5477 ret void 5478} 5479 5480declare void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 8), ptr, <vscale x 4 x i16>, i64, i64) 5481declare void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 8), ptr, <vscale x 4 x i16>, <vscale x 4 x i1>, i64, i64) 5482 5483define void @test_vsoxseg8_nxv4i16_triscv.vector.tuple_nxv8i8_8t_nxv4i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, <vscale x 4 x i16> %index, i64 %vl) { 5484; CHECK-LABEL: test_vsoxseg8_nxv4i16_triscv.vector.tuple_nxv8i8_8t_nxv4i16: 5485; CHECK: # %bb.0: # %entry 5486; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma 5487; CHECK-NEXT: vsoxseg8ei16.v v8, (a0), v16 5488; CHECK-NEXT: ret 5489entry: 5490 tail call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, <vscale x 4 x i16> %index, i64 %vl, i64 4) 5491 ret void 5492} 5493 5494define void @test_vsoxseg8_mask_nxv4i16_triscv.vector.tuple_nxv8i8_8t_nxv4i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, <vscale x 4 x i16> %index, i64 %vl, <vscale x 4 x i1> %mask) { 5495; CHECK-LABEL: test_vsoxseg8_mask_nxv4i16_triscv.vector.tuple_nxv8i8_8t_nxv4i16: 5496; CHECK: # %bb.0: # %entry 5497; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma 5498; CHECK-NEXT: vsoxseg8ei16.v v8, (a0), v16, v0.t 5499; CHECK-NEXT: ret 5500entry: 5501 tail call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 4) 5502 ret void 5503} 5504 5505declare void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 8), ptr, <vscale x 4 x i32>, i64, i64) 5506declare void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 8), ptr, <vscale x 4 x i32>, <vscale x 4 x i1>, i64, i64) 5507 5508define void @test_vsoxseg8_nxv4i16_triscv.vector.tuple_nxv8i8_8t_nxv4i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, <vscale x 4 x i32> %index, i64 %vl) { 5509; CHECK-LABEL: test_vsoxseg8_nxv4i16_triscv.vector.tuple_nxv8i8_8t_nxv4i32: 5510; CHECK: # %bb.0: # %entry 5511; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma 5512; CHECK-NEXT: vsoxseg8ei32.v v8, (a0), v16 5513; CHECK-NEXT: ret 5514entry: 5515 tail call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, <vscale x 4 x i32> %index, i64 %vl, i64 4) 5516 ret void 5517} 5518 5519define void @test_vsoxseg8_mask_nxv4i16_triscv.vector.tuple_nxv8i8_8t_nxv4i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, <vscale x 4 x i32> %index, i64 %vl, <vscale x 4 x i1> %mask) { 5520; CHECK-LABEL: test_vsoxseg8_mask_nxv4i16_triscv.vector.tuple_nxv8i8_8t_nxv4i32: 5521; CHECK: # %bb.0: # %entry 5522; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma 5523; CHECK-NEXT: vsoxseg8ei32.v v8, (a0), v16, v0.t 5524; CHECK-NEXT: ret 5525entry: 5526 tail call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 4) 5527 ret void 5528} 5529 5530declare void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 8), ptr, <vscale x 4 x i64>, i64, i64) 5531declare void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i64.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 8), ptr, <vscale x 4 x i64>, <vscale x 4 x i1>, i64, i64) 5532 5533define void @test_vsoxseg8_nxv4i16_triscv.vector.tuple_nxv8i8_8t_nxv4i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, <vscale x 4 x i64> %index, i64 %vl) { 5534; CHECK-LABEL: test_vsoxseg8_nxv4i16_triscv.vector.tuple_nxv8i8_8t_nxv4i64: 5535; CHECK: # %bb.0: # %entry 5536; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma 5537; CHECK-NEXT: vsoxseg8ei64.v v8, (a0), v16 5538; CHECK-NEXT: ret 5539entry: 5540 tail call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, <vscale x 4 x i64> %index, i64 %vl, i64 4) 5541 ret void 5542} 5543 5544define void @test_vsoxseg8_mask_nxv4i16_triscv.vector.tuple_nxv8i8_8t_nxv4i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, <vscale x 4 x i64> %index, i64 %vl, <vscale x 4 x i1> %mask) { 5545; CHECK-LABEL: test_vsoxseg8_mask_nxv4i16_triscv.vector.tuple_nxv8i8_8t_nxv4i64: 5546; CHECK: # %bb.0: # %entry 5547; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma 5548; CHECK-NEXT: vsoxseg8ei64.v v8, (a0), v16, v0.t 5549; CHECK-NEXT: ret 5550entry: 5551 tail call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i64.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 4) 5552 ret void 5553} 5554 5555declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 2), ptr, <vscale x 1 x i8>, i64, i64) 5556declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i8.nxv1i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 2), ptr, <vscale x 1 x i8>, <vscale x 1 x i1>, i64, i64) 5557 5558define void @test_vsoxseg2_nxv1i32_triscv.vector.tuple_nxv4i8_2t_nxv1i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %val, ptr %base, <vscale x 1 x i8> %index, i64 %vl) { 5559; CHECK-LABEL: test_vsoxseg2_nxv1i32_triscv.vector.tuple_nxv4i8_2t_nxv1i8: 5560; CHECK: # %bb.0: # %entry 5561; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma 5562; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10 5563; CHECK-NEXT: ret 5564entry: 5565 tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %val, ptr %base, <vscale x 1 x i8> %index, i64 %vl, i64 5) 5566 ret void 5567} 5568 5569define void @test_vsoxseg2_mask_nxv1i32_triscv.vector.tuple_nxv4i8_2t_nxv1i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %val, ptr %base, <vscale x 1 x i8> %index, i64 %vl, <vscale x 1 x i1> %mask) { 5570; CHECK-LABEL: test_vsoxseg2_mask_nxv1i32_triscv.vector.tuple_nxv4i8_2t_nxv1i8: 5571; CHECK: # %bb.0: # %entry 5572; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma 5573; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10, v0.t 5574; CHECK-NEXT: ret 5575entry: 5576 tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i8.nxv1i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 5) 5577 ret void 5578} 5579 5580declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 2), ptr, <vscale x 1 x i16>, i64, i64) 5581declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 2), ptr, <vscale x 1 x i16>, <vscale x 1 x i1>, i64, i64) 5582 5583define void @test_vsoxseg2_nxv1i32_triscv.vector.tuple_nxv4i8_2t_nxv1i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl) { 5584; CHECK-LABEL: test_vsoxseg2_nxv1i32_triscv.vector.tuple_nxv4i8_2t_nxv1i16: 5585; CHECK: # %bb.0: # %entry 5586; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma 5587; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10 5588; CHECK-NEXT: ret 5589entry: 5590 tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl, i64 5) 5591 ret void 5592} 5593 5594define void @test_vsoxseg2_mask_nxv1i32_triscv.vector.tuple_nxv4i8_2t_nxv1i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl, <vscale x 1 x i1> %mask) { 5595; CHECK-LABEL: test_vsoxseg2_mask_nxv1i32_triscv.vector.tuple_nxv4i8_2t_nxv1i16: 5596; CHECK: # %bb.0: # %entry 5597; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma 5598; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10, v0.t 5599; CHECK-NEXT: ret 5600entry: 5601 tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 5) 5602 ret void 5603} 5604 5605declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 2), ptr, <vscale x 1 x i32>, i64, i64) 5606declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i32.nxv1i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 2), ptr, <vscale x 1 x i32>, <vscale x 1 x i1>, i64, i64) 5607 5608define void @test_vsoxseg2_nxv1i32_triscv.vector.tuple_nxv4i8_2t_nxv1i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %val, ptr %base, <vscale x 1 x i32> %index, i64 %vl) { 5609; CHECK-LABEL: test_vsoxseg2_nxv1i32_triscv.vector.tuple_nxv4i8_2t_nxv1i32: 5610; CHECK: # %bb.0: # %entry 5611; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma 5612; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10 5613; CHECK-NEXT: ret 5614entry: 5615 tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %val, ptr %base, <vscale x 1 x i32> %index, i64 %vl, i64 5) 5616 ret void 5617} 5618 5619define void @test_vsoxseg2_mask_nxv1i32_triscv.vector.tuple_nxv4i8_2t_nxv1i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %val, ptr %base, <vscale x 1 x i32> %index, i64 %vl, <vscale x 1 x i1> %mask) { 5620; CHECK-LABEL: test_vsoxseg2_mask_nxv1i32_triscv.vector.tuple_nxv4i8_2t_nxv1i32: 5621; CHECK: # %bb.0: # %entry 5622; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma 5623; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10, v0.t 5624; CHECK-NEXT: ret 5625entry: 5626 tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i32.nxv1i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 5) 5627 ret void 5628} 5629 5630declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 2), ptr, <vscale x 1 x i64>, i64, i64) 5631declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i64.nxv1i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 2), ptr, <vscale x 1 x i64>, <vscale x 1 x i1>, i64, i64) 5632 5633define void @test_vsoxseg2_nxv1i32_triscv.vector.tuple_nxv4i8_2t_nxv1i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %val, ptr %base, <vscale x 1 x i64> %index, i64 %vl) { 5634; CHECK-LABEL: test_vsoxseg2_nxv1i32_triscv.vector.tuple_nxv4i8_2t_nxv1i64: 5635; CHECK: # %bb.0: # %entry 5636; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma 5637; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v10 5638; CHECK-NEXT: ret 5639entry: 5640 tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %val, ptr %base, <vscale x 1 x i64> %index, i64 %vl, i64 5) 5641 ret void 5642} 5643 5644define void @test_vsoxseg2_mask_nxv1i32_triscv.vector.tuple_nxv4i8_2t_nxv1i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %val, ptr %base, <vscale x 1 x i64> %index, i64 %vl, <vscale x 1 x i1> %mask) { 5645; CHECK-LABEL: test_vsoxseg2_mask_nxv1i32_triscv.vector.tuple_nxv4i8_2t_nxv1i64: 5646; CHECK: # %bb.0: # %entry 5647; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma 5648; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v10, v0.t 5649; CHECK-NEXT: ret 5650entry: 5651 tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i64.nxv1i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %val, ptr %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 5) 5652 ret void 5653} 5654 5655declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 2), ptr, <vscale x 2 x i8>, i64, i64) 5656declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i8.nxv2i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 2), ptr, <vscale x 2 x i8>, <vscale x 2 x i1>, i64, i64) 5657 5658define void @test_vsoxseg2_nxv2i32_triscv.vector.tuple_nxv8i8_2t_nxv2i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, <vscale x 2 x i8> %index, i64 %vl) { 5659; CHECK-LABEL: test_vsoxseg2_nxv2i32_triscv.vector.tuple_nxv8i8_2t_nxv2i8: 5660; CHECK: # %bb.0: # %entry 5661; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma 5662; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10 5663; CHECK-NEXT: ret 5664entry: 5665 tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, <vscale x 2 x i8> %index, i64 %vl, i64 5) 5666 ret void 5667} 5668 5669define void @test_vsoxseg2_mask_nxv2i32_triscv.vector.tuple_nxv8i8_2t_nxv2i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, <vscale x 2 x i8> %index, i64 %vl, <vscale x 2 x i1> %mask) { 5670; CHECK-LABEL: test_vsoxseg2_mask_nxv2i32_triscv.vector.tuple_nxv8i8_2t_nxv2i8: 5671; CHECK: # %bb.0: # %entry 5672; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma 5673; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10, v0.t 5674; CHECK-NEXT: ret 5675entry: 5676 tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i8.nxv2i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 5) 5677 ret void 5678} 5679 5680declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 2), ptr, <vscale x 2 x i16>, i64, i64) 5681declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i16.nxv2i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 2), ptr, <vscale x 2 x i16>, <vscale x 2 x i1>, i64, i64) 5682 5683define void @test_vsoxseg2_nxv2i32_triscv.vector.tuple_nxv8i8_2t_nxv2i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, <vscale x 2 x i16> %index, i64 %vl) { 5684; CHECK-LABEL: test_vsoxseg2_nxv2i32_triscv.vector.tuple_nxv8i8_2t_nxv2i16: 5685; CHECK: # %bb.0: # %entry 5686; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma 5687; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10 5688; CHECK-NEXT: ret 5689entry: 5690 tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, <vscale x 2 x i16> %index, i64 %vl, i64 5) 5691 ret void 5692} 5693 5694define void @test_vsoxseg2_mask_nxv2i32_triscv.vector.tuple_nxv8i8_2t_nxv2i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, <vscale x 2 x i16> %index, i64 %vl, <vscale x 2 x i1> %mask) { 5695; CHECK-LABEL: test_vsoxseg2_mask_nxv2i32_triscv.vector.tuple_nxv8i8_2t_nxv2i16: 5696; CHECK: # %bb.0: # %entry 5697; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma 5698; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10, v0.t 5699; CHECK-NEXT: ret 5700entry: 5701 tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i16.nxv2i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 5) 5702 ret void 5703} 5704 5705declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 2), ptr, <vscale x 2 x i32>, i64, i64) 5706declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i32.nxv2i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 2), ptr, <vscale x 2 x i32>, <vscale x 2 x i1>, i64, i64) 5707 5708define void @test_vsoxseg2_nxv2i32_triscv.vector.tuple_nxv8i8_2t_nxv2i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, <vscale x 2 x i32> %index, i64 %vl) { 5709; CHECK-LABEL: test_vsoxseg2_nxv2i32_triscv.vector.tuple_nxv8i8_2t_nxv2i32: 5710; CHECK: # %bb.0: # %entry 5711; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma 5712; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10 5713; CHECK-NEXT: ret 5714entry: 5715 tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, <vscale x 2 x i32> %index, i64 %vl, i64 5) 5716 ret void 5717} 5718 5719define void @test_vsoxseg2_mask_nxv2i32_triscv.vector.tuple_nxv8i8_2t_nxv2i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, <vscale x 2 x i32> %index, i64 %vl, <vscale x 2 x i1> %mask) { 5720; CHECK-LABEL: test_vsoxseg2_mask_nxv2i32_triscv.vector.tuple_nxv8i8_2t_nxv2i32: 5721; CHECK: # %bb.0: # %entry 5722; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma 5723; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10, v0.t 5724; CHECK-NEXT: ret 5725entry: 5726 tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i32.nxv2i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 5) 5727 ret void 5728} 5729 5730declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 2), ptr, <vscale x 2 x i64>, i64, i64) 5731declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i64.nxv2i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 2), ptr, <vscale x 2 x i64>, <vscale x 2 x i1>, i64, i64) 5732 5733define void @test_vsoxseg2_nxv2i32_triscv.vector.tuple_nxv8i8_2t_nxv2i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl) { 5734; CHECK-LABEL: test_vsoxseg2_nxv2i32_triscv.vector.tuple_nxv8i8_2t_nxv2i64: 5735; CHECK: # %bb.0: # %entry 5736; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma 5737; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v10 5738; CHECK-NEXT: ret 5739entry: 5740 tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl, i64 5) 5741 ret void 5742} 5743 5744define void @test_vsoxseg2_mask_nxv2i32_triscv.vector.tuple_nxv8i8_2t_nxv2i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl, <vscale x 2 x i1> %mask) { 5745; CHECK-LABEL: test_vsoxseg2_mask_nxv2i32_triscv.vector.tuple_nxv8i8_2t_nxv2i64: 5746; CHECK: # %bb.0: # %entry 5747; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma 5748; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v10, v0.t 5749; CHECK-NEXT: ret 5750entry: 5751 tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i64.nxv2i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 5) 5752 ret void 5753} 5754 5755declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i8(target("riscv.vector.tuple", <vscale x 16 x i8>, 2), ptr, <vscale x 4 x i8>, i64, i64) 5756declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i8.nxv4i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 2), ptr, <vscale x 4 x i8>, <vscale x 4 x i1>, i64, i64) 5757 5758define void @test_vsoxseg2_nxv4i32_triscv.vector.tuple_nxv16i8_2t_nxv4i8(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, <vscale x 4 x i8> %index, i64 %vl) { 5759; CHECK-LABEL: test_vsoxseg2_nxv4i32_triscv.vector.tuple_nxv16i8_2t_nxv4i8: 5760; CHECK: # %bb.0: # %entry 5761; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma 5762; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v12 5763; CHECK-NEXT: ret 5764entry: 5765 tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i8(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, <vscale x 4 x i8> %index, i64 %vl, i64 5) 5766 ret void 5767} 5768 5769define void @test_vsoxseg2_mask_nxv4i32_triscv.vector.tuple_nxv16i8_2t_nxv4i8(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, <vscale x 4 x i8> %index, i64 %vl, <vscale x 4 x i1> %mask) { 5770; CHECK-LABEL: test_vsoxseg2_mask_nxv4i32_triscv.vector.tuple_nxv16i8_2t_nxv4i8: 5771; CHECK: # %bb.0: # %entry 5772; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma 5773; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v12, v0.t 5774; CHECK-NEXT: ret 5775entry: 5776 tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i8.nxv4i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 5) 5777 ret void 5778} 5779 5780declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i16(target("riscv.vector.tuple", <vscale x 16 x i8>, 2), ptr, <vscale x 4 x i16>, i64, i64) 5781declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i16.nxv4i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 2), ptr, <vscale x 4 x i16>, <vscale x 4 x i1>, i64, i64) 5782 5783define void @test_vsoxseg2_nxv4i32_triscv.vector.tuple_nxv16i8_2t_nxv4i16(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, <vscale x 4 x i16> %index, i64 %vl) { 5784; CHECK-LABEL: test_vsoxseg2_nxv4i32_triscv.vector.tuple_nxv16i8_2t_nxv4i16: 5785; CHECK: # %bb.0: # %entry 5786; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma 5787; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v12 5788; CHECK-NEXT: ret 5789entry: 5790 tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i16(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, <vscale x 4 x i16> %index, i64 %vl, i64 5) 5791 ret void 5792} 5793 5794define void @test_vsoxseg2_mask_nxv4i32_triscv.vector.tuple_nxv16i8_2t_nxv4i16(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, <vscale x 4 x i16> %index, i64 %vl, <vscale x 4 x i1> %mask) { 5795; CHECK-LABEL: test_vsoxseg2_mask_nxv4i32_triscv.vector.tuple_nxv16i8_2t_nxv4i16: 5796; CHECK: # %bb.0: # %entry 5797; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma 5798; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v12, v0.t 5799; CHECK-NEXT: ret 5800entry: 5801 tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i16.nxv4i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 5) 5802 ret void 5803} 5804 5805declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i32(target("riscv.vector.tuple", <vscale x 16 x i8>, 2), ptr, <vscale x 4 x i32>, i64, i64) 5806declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i32.nxv4i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 2), ptr, <vscale x 4 x i32>, <vscale x 4 x i1>, i64, i64) 5807 5808define void @test_vsoxseg2_nxv4i32_triscv.vector.tuple_nxv16i8_2t_nxv4i32(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, <vscale x 4 x i32> %index, i64 %vl) { 5809; CHECK-LABEL: test_vsoxseg2_nxv4i32_triscv.vector.tuple_nxv16i8_2t_nxv4i32: 5810; CHECK: # %bb.0: # %entry 5811; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma 5812; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v12 5813; CHECK-NEXT: ret 5814entry: 5815 tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i32(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, <vscale x 4 x i32> %index, i64 %vl, i64 5) 5816 ret void 5817} 5818 5819define void @test_vsoxseg2_mask_nxv4i32_triscv.vector.tuple_nxv16i8_2t_nxv4i32(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, <vscale x 4 x i32> %index, i64 %vl, <vscale x 4 x i1> %mask) { 5820; CHECK-LABEL: test_vsoxseg2_mask_nxv4i32_triscv.vector.tuple_nxv16i8_2t_nxv4i32: 5821; CHECK: # %bb.0: # %entry 5822; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma 5823; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v12, v0.t 5824; CHECK-NEXT: ret 5825entry: 5826 tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i32.nxv4i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 5) 5827 ret void 5828} 5829 5830declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 2), ptr, <vscale x 4 x i64>, i64, i64) 5831declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i64.nxv4i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 2), ptr, <vscale x 4 x i64>, <vscale x 4 x i1>, i64, i64) 5832 5833define void @test_vsoxseg2_nxv4i32_triscv.vector.tuple_nxv16i8_2t_nxv4i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, <vscale x 4 x i64> %index, i64 %vl) { 5834; CHECK-LABEL: test_vsoxseg2_nxv4i32_triscv.vector.tuple_nxv16i8_2t_nxv4i64: 5835; CHECK: # %bb.0: # %entry 5836; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma 5837; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v12 5838; CHECK-NEXT: ret 5839entry: 5840 tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, <vscale x 4 x i64> %index, i64 %vl, i64 5) 5841 ret void 5842} 5843 5844define void @test_vsoxseg2_mask_nxv4i32_triscv.vector.tuple_nxv16i8_2t_nxv4i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, <vscale x 4 x i64> %index, i64 %vl, <vscale x 4 x i1> %mask) { 5845; CHECK-LABEL: test_vsoxseg2_mask_nxv4i32_triscv.vector.tuple_nxv16i8_2t_nxv4i64: 5846; CHECK: # %bb.0: # %entry 5847; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma 5848; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v12, v0.t 5849; CHECK-NEXT: ret 5850entry: 5851 tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i64.nxv4i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 5) 5852 ret void 5853} 5854 5855declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i8(target("riscv.vector.tuple", <vscale x 32 x i8>, 2), ptr, <vscale x 8 x i8>, i64, i64) 5856declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i8.nxv8i1(target("riscv.vector.tuple", <vscale x 32 x i8>, 2), ptr, <vscale x 8 x i8>, <vscale x 8 x i1>, i64, i64) 5857 5858define void @test_vsoxseg2_nxv8i32_triscv.vector.tuple_nxv32i8_2t_nxv8i8(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, <vscale x 8 x i8> %index, i64 %vl) { 5859; CHECK-LABEL: test_vsoxseg2_nxv8i32_triscv.vector.tuple_nxv32i8_2t_nxv8i8: 5860; CHECK: # %bb.0: # %entry 5861; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma 5862; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v16 5863; CHECK-NEXT: ret 5864entry: 5865 tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i8(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, <vscale x 8 x i8> %index, i64 %vl, i64 5) 5866 ret void 5867} 5868 5869define void @test_vsoxseg2_mask_nxv8i32_triscv.vector.tuple_nxv32i8_2t_nxv8i8(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, <vscale x 8 x i8> %index, i64 %vl, <vscale x 8 x i1> %mask) { 5870; CHECK-LABEL: test_vsoxseg2_mask_nxv8i32_triscv.vector.tuple_nxv32i8_2t_nxv8i8: 5871; CHECK: # %bb.0: # %entry 5872; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma 5873; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v16, v0.t 5874; CHECK-NEXT: ret 5875entry: 5876 tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i8.nxv8i1(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, <vscale x 8 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 5) 5877 ret void 5878} 5879 5880declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i16(target("riscv.vector.tuple", <vscale x 32 x i8>, 2), ptr, <vscale x 8 x i16>, i64, i64) 5881declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i16.nxv8i1(target("riscv.vector.tuple", <vscale x 32 x i8>, 2), ptr, <vscale x 8 x i16>, <vscale x 8 x i1>, i64, i64) 5882 5883define void @test_vsoxseg2_nxv8i32_triscv.vector.tuple_nxv32i8_2t_nxv8i16(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, <vscale x 8 x i16> %index, i64 %vl) { 5884; CHECK-LABEL: test_vsoxseg2_nxv8i32_triscv.vector.tuple_nxv32i8_2t_nxv8i16: 5885; CHECK: # %bb.0: # %entry 5886; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma 5887; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v16 5888; CHECK-NEXT: ret 5889entry: 5890 tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i16(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, <vscale x 8 x i16> %index, i64 %vl, i64 5) 5891 ret void 5892} 5893 5894define void @test_vsoxseg2_mask_nxv8i32_triscv.vector.tuple_nxv32i8_2t_nxv8i16(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, <vscale x 8 x i16> %index, i64 %vl, <vscale x 8 x i1> %mask) { 5895; CHECK-LABEL: test_vsoxseg2_mask_nxv8i32_triscv.vector.tuple_nxv32i8_2t_nxv8i16: 5896; CHECK: # %bb.0: # %entry 5897; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma 5898; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v16, v0.t 5899; CHECK-NEXT: ret 5900entry: 5901 tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i16.nxv8i1(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 5) 5902 ret void 5903} 5904 5905declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i32(target("riscv.vector.tuple", <vscale x 32 x i8>, 2), ptr, <vscale x 8 x i32>, i64, i64) 5906declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i32.nxv8i1(target("riscv.vector.tuple", <vscale x 32 x i8>, 2), ptr, <vscale x 8 x i32>, <vscale x 8 x i1>, i64, i64) 5907 5908define void @test_vsoxseg2_nxv8i32_triscv.vector.tuple_nxv32i8_2t_nxv8i32(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, <vscale x 8 x i32> %index, i64 %vl) { 5909; CHECK-LABEL: test_vsoxseg2_nxv8i32_triscv.vector.tuple_nxv32i8_2t_nxv8i32: 5910; CHECK: # %bb.0: # %entry 5911; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma 5912; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v16 5913; CHECK-NEXT: ret 5914entry: 5915 tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i32(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, <vscale x 8 x i32> %index, i64 %vl, i64 5) 5916 ret void 5917} 5918 5919define void @test_vsoxseg2_mask_nxv8i32_triscv.vector.tuple_nxv32i8_2t_nxv8i32(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, <vscale x 8 x i32> %index, i64 %vl, <vscale x 8 x i1> %mask) { 5920; CHECK-LABEL: test_vsoxseg2_mask_nxv8i32_triscv.vector.tuple_nxv32i8_2t_nxv8i32: 5921; CHECK: # %bb.0: # %entry 5922; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma 5923; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v16, v0.t 5924; CHECK-NEXT: ret 5925entry: 5926 tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i32.nxv8i1(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 5) 5927 ret void 5928} 5929 5930declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i64(target("riscv.vector.tuple", <vscale x 32 x i8>, 2), ptr, <vscale x 8 x i64>, i64, i64) 5931declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i64.nxv8i1(target("riscv.vector.tuple", <vscale x 32 x i8>, 2), ptr, <vscale x 8 x i64>, <vscale x 8 x i1>, i64, i64) 5932 5933define void @test_vsoxseg2_nxv8i32_triscv.vector.tuple_nxv32i8_2t_nxv8i64(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, <vscale x 8 x i64> %index, i64 %vl) { 5934; CHECK-LABEL: test_vsoxseg2_nxv8i32_triscv.vector.tuple_nxv32i8_2t_nxv8i64: 5935; CHECK: # %bb.0: # %entry 5936; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma 5937; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v16 5938; CHECK-NEXT: ret 5939entry: 5940 tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i64(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, <vscale x 8 x i64> %index, i64 %vl, i64 5) 5941 ret void 5942} 5943 5944define void @test_vsoxseg2_mask_nxv8i32_triscv.vector.tuple_nxv32i8_2t_nxv8i64(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, <vscale x 8 x i64> %index, i64 %vl, <vscale x 8 x i1> %mask) { 5945; CHECK-LABEL: test_vsoxseg2_mask_nxv8i32_triscv.vector.tuple_nxv32i8_2t_nxv8i64: 5946; CHECK: # %bb.0: # %entry 5947; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma 5948; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v16, v0.t 5949; CHECK-NEXT: ret 5950entry: 5951 tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i64.nxv8i1(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, <vscale x 8 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 5) 5952 ret void 5953} 5954 5955declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 3), ptr, <vscale x 1 x i8>, i64, i64) 5956declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i8.nxv1i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 3), ptr, <vscale x 1 x i8>, <vscale x 1 x i1>, i64, i64) 5957 5958define void @test_vsoxseg3_nxv1i32_triscv.vector.tuple_nxv4i8_3t_nxv1i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %val, ptr %base, <vscale x 1 x i8> %index, i64 %vl) { 5959; CHECK-LABEL: test_vsoxseg3_nxv1i32_triscv.vector.tuple_nxv4i8_3t_nxv1i8: 5960; CHECK: # %bb.0: # %entry 5961; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma 5962; CHECK-NEXT: vsoxseg3ei8.v v8, (a0), v11 5963; CHECK-NEXT: ret 5964entry: 5965 tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %val, ptr %base, <vscale x 1 x i8> %index, i64 %vl, i64 5) 5966 ret void 5967} 5968 5969define void @test_vsoxseg3_mask_nxv1i32_triscv.vector.tuple_nxv4i8_3t_nxv1i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %val, ptr %base, <vscale x 1 x i8> %index, i64 %vl, <vscale x 1 x i1> %mask) { 5970; CHECK-LABEL: test_vsoxseg3_mask_nxv1i32_triscv.vector.tuple_nxv4i8_3t_nxv1i8: 5971; CHECK: # %bb.0: # %entry 5972; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma 5973; CHECK-NEXT: vsoxseg3ei8.v v8, (a0), v11, v0.t 5974; CHECK-NEXT: ret 5975entry: 5976 tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i8.nxv1i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 5) 5977 ret void 5978} 5979 5980declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 3), ptr, <vscale x 1 x i16>, i64, i64) 5981declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 3), ptr, <vscale x 1 x i16>, <vscale x 1 x i1>, i64, i64) 5982 5983define void @test_vsoxseg3_nxv1i32_triscv.vector.tuple_nxv4i8_3t_nxv1i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl) { 5984; CHECK-LABEL: test_vsoxseg3_nxv1i32_triscv.vector.tuple_nxv4i8_3t_nxv1i16: 5985; CHECK: # %bb.0: # %entry 5986; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma 5987; CHECK-NEXT: vsoxseg3ei16.v v8, (a0), v11 5988; CHECK-NEXT: ret 5989entry: 5990 tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl, i64 5) 5991 ret void 5992} 5993 5994define void @test_vsoxseg3_mask_nxv1i32_triscv.vector.tuple_nxv4i8_3t_nxv1i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl, <vscale x 1 x i1> %mask) { 5995; CHECK-LABEL: test_vsoxseg3_mask_nxv1i32_triscv.vector.tuple_nxv4i8_3t_nxv1i16: 5996; CHECK: # %bb.0: # %entry 5997; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma 5998; CHECK-NEXT: vsoxseg3ei16.v v8, (a0), v11, v0.t 5999; CHECK-NEXT: ret 6000entry: 6001 tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 5) 6002 ret void 6003} 6004 6005declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 3), ptr, <vscale x 1 x i32>, i64, i64) 6006declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i32.nxv1i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 3), ptr, <vscale x 1 x i32>, <vscale x 1 x i1>, i64, i64) 6007 6008define void @test_vsoxseg3_nxv1i32_triscv.vector.tuple_nxv4i8_3t_nxv1i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %val, ptr %base, <vscale x 1 x i32> %index, i64 %vl) { 6009; CHECK-LABEL: test_vsoxseg3_nxv1i32_triscv.vector.tuple_nxv4i8_3t_nxv1i32: 6010; CHECK: # %bb.0: # %entry 6011; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma 6012; CHECK-NEXT: vsoxseg3ei32.v v8, (a0), v11 6013; CHECK-NEXT: ret 6014entry: 6015 tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %val, ptr %base, <vscale x 1 x i32> %index, i64 %vl, i64 5) 6016 ret void 6017} 6018 6019define void @test_vsoxseg3_mask_nxv1i32_triscv.vector.tuple_nxv4i8_3t_nxv1i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %val, ptr %base, <vscale x 1 x i32> %index, i64 %vl, <vscale x 1 x i1> %mask) { 6020; CHECK-LABEL: test_vsoxseg3_mask_nxv1i32_triscv.vector.tuple_nxv4i8_3t_nxv1i32: 6021; CHECK: # %bb.0: # %entry 6022; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma 6023; CHECK-NEXT: vsoxseg3ei32.v v8, (a0), v11, v0.t 6024; CHECK-NEXT: ret 6025entry: 6026 tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i32.nxv1i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 5) 6027 ret void 6028} 6029 6030declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 3), ptr, <vscale x 1 x i64>, i64, i64) 6031declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i64.nxv1i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 3), ptr, <vscale x 1 x i64>, <vscale x 1 x i1>, i64, i64) 6032 6033define void @test_vsoxseg3_nxv1i32_triscv.vector.tuple_nxv4i8_3t_nxv1i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %val, ptr %base, <vscale x 1 x i64> %index, i64 %vl) { 6034; CHECK-LABEL: test_vsoxseg3_nxv1i32_triscv.vector.tuple_nxv4i8_3t_nxv1i64: 6035; CHECK: # %bb.0: # %entry 6036; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma 6037; CHECK-NEXT: vsoxseg3ei64.v v8, (a0), v11 6038; CHECK-NEXT: ret 6039entry: 6040 tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %val, ptr %base, <vscale x 1 x i64> %index, i64 %vl, i64 5) 6041 ret void 6042} 6043 6044define void @test_vsoxseg3_mask_nxv1i32_triscv.vector.tuple_nxv4i8_3t_nxv1i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %val, ptr %base, <vscale x 1 x i64> %index, i64 %vl, <vscale x 1 x i1> %mask) { 6045; CHECK-LABEL: test_vsoxseg3_mask_nxv1i32_triscv.vector.tuple_nxv4i8_3t_nxv1i64: 6046; CHECK: # %bb.0: # %entry 6047; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma 6048; CHECK-NEXT: vsoxseg3ei64.v v8, (a0), v11, v0.t 6049; CHECK-NEXT: ret 6050entry: 6051 tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i64.nxv1i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %val, ptr %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 5) 6052 ret void 6053} 6054 6055declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 3), ptr, <vscale x 2 x i8>, i64, i64) 6056declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i8.nxv2i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 3), ptr, <vscale x 2 x i8>, <vscale x 2 x i1>, i64, i64) 6057 6058define void @test_vsoxseg3_nxv2i32_triscv.vector.tuple_nxv8i8_3t_nxv2i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, <vscale x 2 x i8> %index, i64 %vl) { 6059; CHECK-LABEL: test_vsoxseg3_nxv2i32_triscv.vector.tuple_nxv8i8_3t_nxv2i8: 6060; CHECK: # %bb.0: # %entry 6061; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma 6062; CHECK-NEXT: vsoxseg3ei8.v v8, (a0), v11 6063; CHECK-NEXT: ret 6064entry: 6065 tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, <vscale x 2 x i8> %index, i64 %vl, i64 5) 6066 ret void 6067} 6068 6069define void @test_vsoxseg3_mask_nxv2i32_triscv.vector.tuple_nxv8i8_3t_nxv2i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, <vscale x 2 x i8> %index, i64 %vl, <vscale x 2 x i1> %mask) { 6070; CHECK-LABEL: test_vsoxseg3_mask_nxv2i32_triscv.vector.tuple_nxv8i8_3t_nxv2i8: 6071; CHECK: # %bb.0: # %entry 6072; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma 6073; CHECK-NEXT: vsoxseg3ei8.v v8, (a0), v11, v0.t 6074; CHECK-NEXT: ret 6075entry: 6076 tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i8.nxv2i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 5) 6077 ret void 6078} 6079 6080declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 3), ptr, <vscale x 2 x i16>, i64, i64) 6081declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i16.nxv2i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 3), ptr, <vscale x 2 x i16>, <vscale x 2 x i1>, i64, i64) 6082 6083define void @test_vsoxseg3_nxv2i32_triscv.vector.tuple_nxv8i8_3t_nxv2i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, <vscale x 2 x i16> %index, i64 %vl) { 6084; CHECK-LABEL: test_vsoxseg3_nxv2i32_triscv.vector.tuple_nxv8i8_3t_nxv2i16: 6085; CHECK: # %bb.0: # %entry 6086; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma 6087; CHECK-NEXT: vsoxseg3ei16.v v8, (a0), v11 6088; CHECK-NEXT: ret 6089entry: 6090 tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, <vscale x 2 x i16> %index, i64 %vl, i64 5) 6091 ret void 6092} 6093 6094define void @test_vsoxseg3_mask_nxv2i32_triscv.vector.tuple_nxv8i8_3t_nxv2i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, <vscale x 2 x i16> %index, i64 %vl, <vscale x 2 x i1> %mask) { 6095; CHECK-LABEL: test_vsoxseg3_mask_nxv2i32_triscv.vector.tuple_nxv8i8_3t_nxv2i16: 6096; CHECK: # %bb.0: # %entry 6097; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma 6098; CHECK-NEXT: vsoxseg3ei16.v v8, (a0), v11, v0.t 6099; CHECK-NEXT: ret 6100entry: 6101 tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i16.nxv2i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 5) 6102 ret void 6103} 6104 6105declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 3), ptr, <vscale x 2 x i32>, i64, i64) 6106declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i32.nxv2i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 3), ptr, <vscale x 2 x i32>, <vscale x 2 x i1>, i64, i64) 6107 6108define void @test_vsoxseg3_nxv2i32_triscv.vector.tuple_nxv8i8_3t_nxv2i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, <vscale x 2 x i32> %index, i64 %vl) { 6109; CHECK-LABEL: test_vsoxseg3_nxv2i32_triscv.vector.tuple_nxv8i8_3t_nxv2i32: 6110; CHECK: # %bb.0: # %entry 6111; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma 6112; CHECK-NEXT: vsoxseg3ei32.v v8, (a0), v11 6113; CHECK-NEXT: ret 6114entry: 6115 tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, <vscale x 2 x i32> %index, i64 %vl, i64 5) 6116 ret void 6117} 6118 6119define void @test_vsoxseg3_mask_nxv2i32_triscv.vector.tuple_nxv8i8_3t_nxv2i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, <vscale x 2 x i32> %index, i64 %vl, <vscale x 2 x i1> %mask) { 6120; CHECK-LABEL: test_vsoxseg3_mask_nxv2i32_triscv.vector.tuple_nxv8i8_3t_nxv2i32: 6121; CHECK: # %bb.0: # %entry 6122; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma 6123; CHECK-NEXT: vsoxseg3ei32.v v8, (a0), v11, v0.t 6124; CHECK-NEXT: ret 6125entry: 6126 tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i32.nxv2i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 5) 6127 ret void 6128} 6129 6130declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 3), ptr, <vscale x 2 x i64>, i64, i64) 6131declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i64.nxv2i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 3), ptr, <vscale x 2 x i64>, <vscale x 2 x i1>, i64, i64) 6132 6133define void @test_vsoxseg3_nxv2i32_triscv.vector.tuple_nxv8i8_3t_nxv2i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl) { 6134; CHECK-LABEL: test_vsoxseg3_nxv2i32_triscv.vector.tuple_nxv8i8_3t_nxv2i64: 6135; CHECK: # %bb.0: # %entry 6136; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma 6137; CHECK-NEXT: vsoxseg3ei64.v v8, (a0), v12 6138; CHECK-NEXT: ret 6139entry: 6140 tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl, i64 5) 6141 ret void 6142} 6143 6144define void @test_vsoxseg3_mask_nxv2i32_triscv.vector.tuple_nxv8i8_3t_nxv2i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl, <vscale x 2 x i1> %mask) { 6145; CHECK-LABEL: test_vsoxseg3_mask_nxv2i32_triscv.vector.tuple_nxv8i8_3t_nxv2i64: 6146; CHECK: # %bb.0: # %entry 6147; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma 6148; CHECK-NEXT: vsoxseg3ei64.v v8, (a0), v12, v0.t 6149; CHECK-NEXT: ret 6150entry: 6151 tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i64.nxv2i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 5) 6152 ret void 6153} 6154 6155declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i8(target("riscv.vector.tuple", <vscale x 16 x i8>, 3), ptr, <vscale x 4 x i8>, i64, i64) 6156declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i8.nxv4i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 3), ptr, <vscale x 4 x i8>, <vscale x 4 x i1>, i64, i64) 6157 6158define void @test_vsoxseg3_nxv4i32_triscv.vector.tuple_nxv16i8_3t_nxv4i8(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, <vscale x 4 x i8> %index, i64 %vl) { 6159; CHECK-LABEL: test_vsoxseg3_nxv4i32_triscv.vector.tuple_nxv16i8_3t_nxv4i8: 6160; CHECK: # %bb.0: # %entry 6161; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma 6162; CHECK-NEXT: vsoxseg3ei8.v v8, (a0), v14 6163; CHECK-NEXT: ret 6164entry: 6165 tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i8(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, <vscale x 4 x i8> %index, i64 %vl, i64 5) 6166 ret void 6167} 6168 6169define void @test_vsoxseg3_mask_nxv4i32_triscv.vector.tuple_nxv16i8_3t_nxv4i8(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, <vscale x 4 x i8> %index, i64 %vl, <vscale x 4 x i1> %mask) { 6170; CHECK-LABEL: test_vsoxseg3_mask_nxv4i32_triscv.vector.tuple_nxv16i8_3t_nxv4i8: 6171; CHECK: # %bb.0: # %entry 6172; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma 6173; CHECK-NEXT: vsoxseg3ei8.v v8, (a0), v14, v0.t 6174; CHECK-NEXT: ret 6175entry: 6176 tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i8.nxv4i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 5) 6177 ret void 6178} 6179 6180declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i16(target("riscv.vector.tuple", <vscale x 16 x i8>, 3), ptr, <vscale x 4 x i16>, i64, i64) 6181declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i16.nxv4i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 3), ptr, <vscale x 4 x i16>, <vscale x 4 x i1>, i64, i64) 6182 6183define void @test_vsoxseg3_nxv4i32_triscv.vector.tuple_nxv16i8_3t_nxv4i16(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, <vscale x 4 x i16> %index, i64 %vl) { 6184; CHECK-LABEL: test_vsoxseg3_nxv4i32_triscv.vector.tuple_nxv16i8_3t_nxv4i16: 6185; CHECK: # %bb.0: # %entry 6186; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma 6187; CHECK-NEXT: vsoxseg3ei16.v v8, (a0), v14 6188; CHECK-NEXT: ret 6189entry: 6190 tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i16(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, <vscale x 4 x i16> %index, i64 %vl, i64 5) 6191 ret void 6192} 6193 6194define void @test_vsoxseg3_mask_nxv4i32_triscv.vector.tuple_nxv16i8_3t_nxv4i16(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, <vscale x 4 x i16> %index, i64 %vl, <vscale x 4 x i1> %mask) { 6195; CHECK-LABEL: test_vsoxseg3_mask_nxv4i32_triscv.vector.tuple_nxv16i8_3t_nxv4i16: 6196; CHECK: # %bb.0: # %entry 6197; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma 6198; CHECK-NEXT: vsoxseg3ei16.v v8, (a0), v14, v0.t 6199; CHECK-NEXT: ret 6200entry: 6201 tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i16.nxv4i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 5) 6202 ret void 6203} 6204 6205declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i32(target("riscv.vector.tuple", <vscale x 16 x i8>, 3), ptr, <vscale x 4 x i32>, i64, i64) 6206declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i32.nxv4i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 3), ptr, <vscale x 4 x i32>, <vscale x 4 x i1>, i64, i64) 6207 6208define void @test_vsoxseg3_nxv4i32_triscv.vector.tuple_nxv16i8_3t_nxv4i32(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, <vscale x 4 x i32> %index, i64 %vl) { 6209; CHECK-LABEL: test_vsoxseg3_nxv4i32_triscv.vector.tuple_nxv16i8_3t_nxv4i32: 6210; CHECK: # %bb.0: # %entry 6211; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma 6212; CHECK-NEXT: vsoxseg3ei32.v v8, (a0), v14 6213; CHECK-NEXT: ret 6214entry: 6215 tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i32(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, <vscale x 4 x i32> %index, i64 %vl, i64 5) 6216 ret void 6217} 6218 6219define void @test_vsoxseg3_mask_nxv4i32_triscv.vector.tuple_nxv16i8_3t_nxv4i32(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, <vscale x 4 x i32> %index, i64 %vl, <vscale x 4 x i1> %mask) { 6220; CHECK-LABEL: test_vsoxseg3_mask_nxv4i32_triscv.vector.tuple_nxv16i8_3t_nxv4i32: 6221; CHECK: # %bb.0: # %entry 6222; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma 6223; CHECK-NEXT: vsoxseg3ei32.v v8, (a0), v14, v0.t 6224; CHECK-NEXT: ret 6225entry: 6226 tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i32.nxv4i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 5) 6227 ret void 6228} 6229 6230declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 3), ptr, <vscale x 4 x i64>, i64, i64) 6231declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i64.nxv4i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 3), ptr, <vscale x 4 x i64>, <vscale x 4 x i1>, i64, i64) 6232 6233define void @test_vsoxseg3_nxv4i32_triscv.vector.tuple_nxv16i8_3t_nxv4i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, <vscale x 4 x i64> %index, i64 %vl) { 6234; CHECK-LABEL: test_vsoxseg3_nxv4i32_triscv.vector.tuple_nxv16i8_3t_nxv4i64: 6235; CHECK: # %bb.0: # %entry 6236; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma 6237; CHECK-NEXT: vsoxseg3ei64.v v8, (a0), v16 6238; CHECK-NEXT: ret 6239entry: 6240 tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, <vscale x 4 x i64> %index, i64 %vl, i64 5) 6241 ret void 6242} 6243 6244define void @test_vsoxseg3_mask_nxv4i32_triscv.vector.tuple_nxv16i8_3t_nxv4i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, <vscale x 4 x i64> %index, i64 %vl, <vscale x 4 x i1> %mask) { 6245; CHECK-LABEL: test_vsoxseg3_mask_nxv4i32_triscv.vector.tuple_nxv16i8_3t_nxv4i64: 6246; CHECK: # %bb.0: # %entry 6247; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma 6248; CHECK-NEXT: vsoxseg3ei64.v v8, (a0), v16, v0.t 6249; CHECK-NEXT: ret 6250entry: 6251 tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i64.nxv4i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 5) 6252 ret void 6253} 6254 6255declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 4), ptr, <vscale x 1 x i8>, i64, i64) 6256declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i8.nxv1i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 4), ptr, <vscale x 1 x i8>, <vscale x 1 x i1>, i64, i64) 6257 6258define void @test_vsoxseg4_nxv1i32_triscv.vector.tuple_nxv4i8_4t_nxv1i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %val, ptr %base, <vscale x 1 x i8> %index, i64 %vl) { 6259; CHECK-LABEL: test_vsoxseg4_nxv1i32_triscv.vector.tuple_nxv4i8_4t_nxv1i8: 6260; CHECK: # %bb.0: # %entry 6261; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma 6262; CHECK-NEXT: vsoxseg4ei8.v v8, (a0), v12 6263; CHECK-NEXT: ret 6264entry: 6265 tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %val, ptr %base, <vscale x 1 x i8> %index, i64 %vl, i64 5) 6266 ret void 6267} 6268 6269define void @test_vsoxseg4_mask_nxv1i32_triscv.vector.tuple_nxv4i8_4t_nxv1i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %val, ptr %base, <vscale x 1 x i8> %index, i64 %vl, <vscale x 1 x i1> %mask) { 6270; CHECK-LABEL: test_vsoxseg4_mask_nxv1i32_triscv.vector.tuple_nxv4i8_4t_nxv1i8: 6271; CHECK: # %bb.0: # %entry 6272; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma 6273; CHECK-NEXT: vsoxseg4ei8.v v8, (a0), v12, v0.t 6274; CHECK-NEXT: ret 6275entry: 6276 tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i8.nxv1i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 5) 6277 ret void 6278} 6279 6280declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 4), ptr, <vscale x 1 x i16>, i64, i64) 6281declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 4), ptr, <vscale x 1 x i16>, <vscale x 1 x i1>, i64, i64) 6282 6283define void @test_vsoxseg4_nxv1i32_triscv.vector.tuple_nxv4i8_4t_nxv1i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl) { 6284; CHECK-LABEL: test_vsoxseg4_nxv1i32_triscv.vector.tuple_nxv4i8_4t_nxv1i16: 6285; CHECK: # %bb.0: # %entry 6286; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma 6287; CHECK-NEXT: vsoxseg4ei16.v v8, (a0), v12 6288; CHECK-NEXT: ret 6289entry: 6290 tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl, i64 5) 6291 ret void 6292} 6293 6294define void @test_vsoxseg4_mask_nxv1i32_triscv.vector.tuple_nxv4i8_4t_nxv1i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl, <vscale x 1 x i1> %mask) { 6295; CHECK-LABEL: test_vsoxseg4_mask_nxv1i32_triscv.vector.tuple_nxv4i8_4t_nxv1i16: 6296; CHECK: # %bb.0: # %entry 6297; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma 6298; CHECK-NEXT: vsoxseg4ei16.v v8, (a0), v12, v0.t 6299; CHECK-NEXT: ret 6300entry: 6301 tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 5) 6302 ret void 6303} 6304 6305declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 4), ptr, <vscale x 1 x i32>, i64, i64) 6306declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i32.nxv1i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 4), ptr, <vscale x 1 x i32>, <vscale x 1 x i1>, i64, i64) 6307 6308define void @test_vsoxseg4_nxv1i32_triscv.vector.tuple_nxv4i8_4t_nxv1i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %val, ptr %base, <vscale x 1 x i32> %index, i64 %vl) { 6309; CHECK-LABEL: test_vsoxseg4_nxv1i32_triscv.vector.tuple_nxv4i8_4t_nxv1i32: 6310; CHECK: # %bb.0: # %entry 6311; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma 6312; CHECK-NEXT: vsoxseg4ei32.v v8, (a0), v12 6313; CHECK-NEXT: ret 6314entry: 6315 tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %val, ptr %base, <vscale x 1 x i32> %index, i64 %vl, i64 5) 6316 ret void 6317} 6318 6319define void @test_vsoxseg4_mask_nxv1i32_triscv.vector.tuple_nxv4i8_4t_nxv1i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %val, ptr %base, <vscale x 1 x i32> %index, i64 %vl, <vscale x 1 x i1> %mask) { 6320; CHECK-LABEL: test_vsoxseg4_mask_nxv1i32_triscv.vector.tuple_nxv4i8_4t_nxv1i32: 6321; CHECK: # %bb.0: # %entry 6322; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma 6323; CHECK-NEXT: vsoxseg4ei32.v v8, (a0), v12, v0.t 6324; CHECK-NEXT: ret 6325entry: 6326 tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i32.nxv1i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 5) 6327 ret void 6328} 6329 6330declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 4), ptr, <vscale x 1 x i64>, i64, i64) 6331declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i64.nxv1i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 4), ptr, <vscale x 1 x i64>, <vscale x 1 x i1>, i64, i64) 6332 6333define void @test_vsoxseg4_nxv1i32_triscv.vector.tuple_nxv4i8_4t_nxv1i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %val, ptr %base, <vscale x 1 x i64> %index, i64 %vl) { 6334; CHECK-LABEL: test_vsoxseg4_nxv1i32_triscv.vector.tuple_nxv4i8_4t_nxv1i64: 6335; CHECK: # %bb.0: # %entry 6336; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma 6337; CHECK-NEXT: vsoxseg4ei64.v v8, (a0), v12 6338; CHECK-NEXT: ret 6339entry: 6340 tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %val, ptr %base, <vscale x 1 x i64> %index, i64 %vl, i64 5) 6341 ret void 6342} 6343 6344define void @test_vsoxseg4_mask_nxv1i32_triscv.vector.tuple_nxv4i8_4t_nxv1i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %val, ptr %base, <vscale x 1 x i64> %index, i64 %vl, <vscale x 1 x i1> %mask) { 6345; CHECK-LABEL: test_vsoxseg4_mask_nxv1i32_triscv.vector.tuple_nxv4i8_4t_nxv1i64: 6346; CHECK: # %bb.0: # %entry 6347; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma 6348; CHECK-NEXT: vsoxseg4ei64.v v8, (a0), v12, v0.t 6349; CHECK-NEXT: ret 6350entry: 6351 tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i64.nxv1i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %val, ptr %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 5) 6352 ret void 6353} 6354 6355declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 4), ptr, <vscale x 2 x i8>, i64, i64) 6356declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i8.nxv2i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 4), ptr, <vscale x 2 x i8>, <vscale x 2 x i1>, i64, i64) 6357 6358define void @test_vsoxseg4_nxv2i32_triscv.vector.tuple_nxv8i8_4t_nxv2i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, <vscale x 2 x i8> %index, i64 %vl) { 6359; CHECK-LABEL: test_vsoxseg4_nxv2i32_triscv.vector.tuple_nxv8i8_4t_nxv2i8: 6360; CHECK: # %bb.0: # %entry 6361; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma 6362; CHECK-NEXT: vsoxseg4ei8.v v8, (a0), v12 6363; CHECK-NEXT: ret 6364entry: 6365 tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, <vscale x 2 x i8> %index, i64 %vl, i64 5) 6366 ret void 6367} 6368 6369define void @test_vsoxseg4_mask_nxv2i32_triscv.vector.tuple_nxv8i8_4t_nxv2i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, <vscale x 2 x i8> %index, i64 %vl, <vscale x 2 x i1> %mask) { 6370; CHECK-LABEL: test_vsoxseg4_mask_nxv2i32_triscv.vector.tuple_nxv8i8_4t_nxv2i8: 6371; CHECK: # %bb.0: # %entry 6372; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma 6373; CHECK-NEXT: vsoxseg4ei8.v v8, (a0), v12, v0.t 6374; CHECK-NEXT: ret 6375entry: 6376 tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i8.nxv2i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 5) 6377 ret void 6378} 6379 6380declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 4), ptr, <vscale x 2 x i16>, i64, i64) 6381declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i16.nxv2i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 4), ptr, <vscale x 2 x i16>, <vscale x 2 x i1>, i64, i64) 6382 6383define void @test_vsoxseg4_nxv2i32_triscv.vector.tuple_nxv8i8_4t_nxv2i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, <vscale x 2 x i16> %index, i64 %vl) { 6384; CHECK-LABEL: test_vsoxseg4_nxv2i32_triscv.vector.tuple_nxv8i8_4t_nxv2i16: 6385; CHECK: # %bb.0: # %entry 6386; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma 6387; CHECK-NEXT: vsoxseg4ei16.v v8, (a0), v12 6388; CHECK-NEXT: ret 6389entry: 6390 tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, <vscale x 2 x i16> %index, i64 %vl, i64 5) 6391 ret void 6392} 6393 6394define void @test_vsoxseg4_mask_nxv2i32_triscv.vector.tuple_nxv8i8_4t_nxv2i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, <vscale x 2 x i16> %index, i64 %vl, <vscale x 2 x i1> %mask) { 6395; CHECK-LABEL: test_vsoxseg4_mask_nxv2i32_triscv.vector.tuple_nxv8i8_4t_nxv2i16: 6396; CHECK: # %bb.0: # %entry 6397; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma 6398; CHECK-NEXT: vsoxseg4ei16.v v8, (a0), v12, v0.t 6399; CHECK-NEXT: ret 6400entry: 6401 tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i16.nxv2i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 5) 6402 ret void 6403} 6404 6405declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 4), ptr, <vscale x 2 x i32>, i64, i64) 6406declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i32.nxv2i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 4), ptr, <vscale x 2 x i32>, <vscale x 2 x i1>, i64, i64) 6407 6408define void @test_vsoxseg4_nxv2i32_triscv.vector.tuple_nxv8i8_4t_nxv2i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, <vscale x 2 x i32> %index, i64 %vl) { 6409; CHECK-LABEL: test_vsoxseg4_nxv2i32_triscv.vector.tuple_nxv8i8_4t_nxv2i32: 6410; CHECK: # %bb.0: # %entry 6411; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma 6412; CHECK-NEXT: vsoxseg4ei32.v v8, (a0), v12 6413; CHECK-NEXT: ret 6414entry: 6415 tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, <vscale x 2 x i32> %index, i64 %vl, i64 5) 6416 ret void 6417} 6418 6419define void @test_vsoxseg4_mask_nxv2i32_triscv.vector.tuple_nxv8i8_4t_nxv2i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, <vscale x 2 x i32> %index, i64 %vl, <vscale x 2 x i1> %mask) { 6420; CHECK-LABEL: test_vsoxseg4_mask_nxv2i32_triscv.vector.tuple_nxv8i8_4t_nxv2i32: 6421; CHECK: # %bb.0: # %entry 6422; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma 6423; CHECK-NEXT: vsoxseg4ei32.v v8, (a0), v12, v0.t 6424; CHECK-NEXT: ret 6425entry: 6426 tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i32.nxv2i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 5) 6427 ret void 6428} 6429 6430declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 4), ptr, <vscale x 2 x i64>, i64, i64) 6431declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i64.nxv2i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 4), ptr, <vscale x 2 x i64>, <vscale x 2 x i1>, i64, i64) 6432 6433define void @test_vsoxseg4_nxv2i32_triscv.vector.tuple_nxv8i8_4t_nxv2i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl) { 6434; CHECK-LABEL: test_vsoxseg4_nxv2i32_triscv.vector.tuple_nxv8i8_4t_nxv2i64: 6435; CHECK: # %bb.0: # %entry 6436; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma 6437; CHECK-NEXT: vsoxseg4ei64.v v8, (a0), v12 6438; CHECK-NEXT: ret 6439entry: 6440 tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl, i64 5) 6441 ret void 6442} 6443 6444define void @test_vsoxseg4_mask_nxv2i32_triscv.vector.tuple_nxv8i8_4t_nxv2i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl, <vscale x 2 x i1> %mask) { 6445; CHECK-LABEL: test_vsoxseg4_mask_nxv2i32_triscv.vector.tuple_nxv8i8_4t_nxv2i64: 6446; CHECK: # %bb.0: # %entry 6447; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma 6448; CHECK-NEXT: vsoxseg4ei64.v v8, (a0), v12, v0.t 6449; CHECK-NEXT: ret 6450entry: 6451 tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i64.nxv2i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 5) 6452 ret void 6453} 6454 6455declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i8(target("riscv.vector.tuple", <vscale x 16 x i8>, 4), ptr, <vscale x 4 x i8>, i64, i64) 6456declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i8.nxv4i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 4), ptr, <vscale x 4 x i8>, <vscale x 4 x i1>, i64, i64) 6457 6458define void @test_vsoxseg4_nxv4i32_triscv.vector.tuple_nxv16i8_4t_nxv4i8(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, <vscale x 4 x i8> %index, i64 %vl) { 6459; CHECK-LABEL: test_vsoxseg4_nxv4i32_triscv.vector.tuple_nxv16i8_4t_nxv4i8: 6460; CHECK: # %bb.0: # %entry 6461; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma 6462; CHECK-NEXT: vsoxseg4ei8.v v8, (a0), v16 6463; CHECK-NEXT: ret 6464entry: 6465 tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i8(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, <vscale x 4 x i8> %index, i64 %vl, i64 5) 6466 ret void 6467} 6468 6469define void @test_vsoxseg4_mask_nxv4i32_triscv.vector.tuple_nxv16i8_4t_nxv4i8(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, <vscale x 4 x i8> %index, i64 %vl, <vscale x 4 x i1> %mask) { 6470; CHECK-LABEL: test_vsoxseg4_mask_nxv4i32_triscv.vector.tuple_nxv16i8_4t_nxv4i8: 6471; CHECK: # %bb.0: # %entry 6472; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma 6473; CHECK-NEXT: vsoxseg4ei8.v v8, (a0), v16, v0.t 6474; CHECK-NEXT: ret 6475entry: 6476 tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i8.nxv4i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 5) 6477 ret void 6478} 6479 6480declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i16(target("riscv.vector.tuple", <vscale x 16 x i8>, 4), ptr, <vscale x 4 x i16>, i64, i64) 6481declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i16.nxv4i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 4), ptr, <vscale x 4 x i16>, <vscale x 4 x i1>, i64, i64) 6482 6483define void @test_vsoxseg4_nxv4i32_triscv.vector.tuple_nxv16i8_4t_nxv4i16(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, <vscale x 4 x i16> %index, i64 %vl) { 6484; CHECK-LABEL: test_vsoxseg4_nxv4i32_triscv.vector.tuple_nxv16i8_4t_nxv4i16: 6485; CHECK: # %bb.0: # %entry 6486; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma 6487; CHECK-NEXT: vsoxseg4ei16.v v8, (a0), v16 6488; CHECK-NEXT: ret 6489entry: 6490 tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i16(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, <vscale x 4 x i16> %index, i64 %vl, i64 5) 6491 ret void 6492} 6493 6494define void @test_vsoxseg4_mask_nxv4i32_triscv.vector.tuple_nxv16i8_4t_nxv4i16(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, <vscale x 4 x i16> %index, i64 %vl, <vscale x 4 x i1> %mask) { 6495; CHECK-LABEL: test_vsoxseg4_mask_nxv4i32_triscv.vector.tuple_nxv16i8_4t_nxv4i16: 6496; CHECK: # %bb.0: # %entry 6497; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma 6498; CHECK-NEXT: vsoxseg4ei16.v v8, (a0), v16, v0.t 6499; CHECK-NEXT: ret 6500entry: 6501 tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i16.nxv4i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 5) 6502 ret void 6503} 6504 6505declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i32(target("riscv.vector.tuple", <vscale x 16 x i8>, 4), ptr, <vscale x 4 x i32>, i64, i64) 6506declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i32.nxv4i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 4), ptr, <vscale x 4 x i32>, <vscale x 4 x i1>, i64, i64) 6507 6508define void @test_vsoxseg4_nxv4i32_triscv.vector.tuple_nxv16i8_4t_nxv4i32(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, <vscale x 4 x i32> %index, i64 %vl) { 6509; CHECK-LABEL: test_vsoxseg4_nxv4i32_triscv.vector.tuple_nxv16i8_4t_nxv4i32: 6510; CHECK: # %bb.0: # %entry 6511; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma 6512; CHECK-NEXT: vsoxseg4ei32.v v8, (a0), v16 6513; CHECK-NEXT: ret 6514entry: 6515 tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i32(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, <vscale x 4 x i32> %index, i64 %vl, i64 5) 6516 ret void 6517} 6518 6519define void @test_vsoxseg4_mask_nxv4i32_triscv.vector.tuple_nxv16i8_4t_nxv4i32(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, <vscale x 4 x i32> %index, i64 %vl, <vscale x 4 x i1> %mask) { 6520; CHECK-LABEL: test_vsoxseg4_mask_nxv4i32_triscv.vector.tuple_nxv16i8_4t_nxv4i32: 6521; CHECK: # %bb.0: # %entry 6522; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma 6523; CHECK-NEXT: vsoxseg4ei32.v v8, (a0), v16, v0.t 6524; CHECK-NEXT: ret 6525entry: 6526 tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i32.nxv4i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 5) 6527 ret void 6528} 6529 6530declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 4), ptr, <vscale x 4 x i64>, i64, i64) 6531declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i64.nxv4i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 4), ptr, <vscale x 4 x i64>, <vscale x 4 x i1>, i64, i64) 6532 6533define void @test_vsoxseg4_nxv4i32_triscv.vector.tuple_nxv16i8_4t_nxv4i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, <vscale x 4 x i64> %index, i64 %vl) { 6534; CHECK-LABEL: test_vsoxseg4_nxv4i32_triscv.vector.tuple_nxv16i8_4t_nxv4i64: 6535; CHECK: # %bb.0: # %entry 6536; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma 6537; CHECK-NEXT: vsoxseg4ei64.v v8, (a0), v16 6538; CHECK-NEXT: ret 6539entry: 6540 tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, <vscale x 4 x i64> %index, i64 %vl, i64 5) 6541 ret void 6542} 6543 6544define void @test_vsoxseg4_mask_nxv4i32_triscv.vector.tuple_nxv16i8_4t_nxv4i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, <vscale x 4 x i64> %index, i64 %vl, <vscale x 4 x i1> %mask) { 6545; CHECK-LABEL: test_vsoxseg4_mask_nxv4i32_triscv.vector.tuple_nxv16i8_4t_nxv4i64: 6546; CHECK: # %bb.0: # %entry 6547; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma 6548; CHECK-NEXT: vsoxseg4ei64.v v8, (a0), v16, v0.t 6549; CHECK-NEXT: ret 6550entry: 6551 tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i64.nxv4i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 5) 6552 ret void 6553} 6554 6555declare void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 5), ptr, <vscale x 1 x i8>, i64, i64) 6556declare void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i8.nxv1i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 5), ptr, <vscale x 1 x i8>, <vscale x 1 x i1>, i64, i64) 6557 6558define void @test_vsoxseg5_nxv1i32_triscv.vector.tuple_nxv4i8_5t_nxv1i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) %val, ptr %base, <vscale x 1 x i8> %index, i64 %vl) { 6559; CHECK-LABEL: test_vsoxseg5_nxv1i32_triscv.vector.tuple_nxv4i8_5t_nxv1i8: 6560; CHECK: # %bb.0: # %entry 6561; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma 6562; CHECK-NEXT: vsoxseg5ei8.v v8, (a0), v13 6563; CHECK-NEXT: ret 6564entry: 6565 tail call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) %val, ptr %base, <vscale x 1 x i8> %index, i64 %vl, i64 5) 6566 ret void 6567} 6568 6569define void @test_vsoxseg5_mask_nxv1i32_triscv.vector.tuple_nxv4i8_5t_nxv1i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) %val, ptr %base, <vscale x 1 x i8> %index, i64 %vl, <vscale x 1 x i1> %mask) { 6570; CHECK-LABEL: test_vsoxseg5_mask_nxv1i32_triscv.vector.tuple_nxv4i8_5t_nxv1i8: 6571; CHECK: # %bb.0: # %entry 6572; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma 6573; CHECK-NEXT: vsoxseg5ei8.v v8, (a0), v13, v0.t 6574; CHECK-NEXT: ret 6575entry: 6576 tail call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i8.nxv1i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 5) 6577 ret void 6578} 6579 6580declare void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 5), ptr, <vscale x 1 x i16>, i64, i64) 6581declare void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 5), ptr, <vscale x 1 x i16>, <vscale x 1 x i1>, i64, i64) 6582 6583define void @test_vsoxseg5_nxv1i32_triscv.vector.tuple_nxv4i8_5t_nxv1i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl) { 6584; CHECK-LABEL: test_vsoxseg5_nxv1i32_triscv.vector.tuple_nxv4i8_5t_nxv1i16: 6585; CHECK: # %bb.0: # %entry 6586; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma 6587; CHECK-NEXT: vsoxseg5ei16.v v8, (a0), v13 6588; CHECK-NEXT: ret 6589entry: 6590 tail call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl, i64 5) 6591 ret void 6592} 6593 6594define void @test_vsoxseg5_mask_nxv1i32_triscv.vector.tuple_nxv4i8_5t_nxv1i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl, <vscale x 1 x i1> %mask) { 6595; CHECK-LABEL: test_vsoxseg5_mask_nxv1i32_triscv.vector.tuple_nxv4i8_5t_nxv1i16: 6596; CHECK: # %bb.0: # %entry 6597; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma 6598; CHECK-NEXT: vsoxseg5ei16.v v8, (a0), v13, v0.t 6599; CHECK-NEXT: ret 6600entry: 6601 tail call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 5) 6602 ret void 6603} 6604 6605declare void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 5), ptr, <vscale x 1 x i32>, i64, i64) 6606declare void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i32.nxv1i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 5), ptr, <vscale x 1 x i32>, <vscale x 1 x i1>, i64, i64) 6607 6608define void @test_vsoxseg5_nxv1i32_triscv.vector.tuple_nxv4i8_5t_nxv1i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) %val, ptr %base, <vscale x 1 x i32> %index, i64 %vl) { 6609; CHECK-LABEL: test_vsoxseg5_nxv1i32_triscv.vector.tuple_nxv4i8_5t_nxv1i32: 6610; CHECK: # %bb.0: # %entry 6611; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma 6612; CHECK-NEXT: vsoxseg5ei32.v v8, (a0), v13 6613; CHECK-NEXT: ret 6614entry: 6615 tail call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) %val, ptr %base, <vscale x 1 x i32> %index, i64 %vl, i64 5) 6616 ret void 6617} 6618 6619define void @test_vsoxseg5_mask_nxv1i32_triscv.vector.tuple_nxv4i8_5t_nxv1i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) %val, ptr %base, <vscale x 1 x i32> %index, i64 %vl, <vscale x 1 x i1> %mask) { 6620; CHECK-LABEL: test_vsoxseg5_mask_nxv1i32_triscv.vector.tuple_nxv4i8_5t_nxv1i32: 6621; CHECK: # %bb.0: # %entry 6622; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma 6623; CHECK-NEXT: vsoxseg5ei32.v v8, (a0), v13, v0.t 6624; CHECK-NEXT: ret 6625entry: 6626 tail call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i32.nxv1i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 5) 6627 ret void 6628} 6629 6630declare void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 5), ptr, <vscale x 1 x i64>, i64, i64) 6631declare void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i64.nxv1i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 5), ptr, <vscale x 1 x i64>, <vscale x 1 x i1>, i64, i64) 6632 6633define void @test_vsoxseg5_nxv1i32_triscv.vector.tuple_nxv4i8_5t_nxv1i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) %val, ptr %base, <vscale x 1 x i64> %index, i64 %vl) { 6634; CHECK-LABEL: test_vsoxseg5_nxv1i32_triscv.vector.tuple_nxv4i8_5t_nxv1i64: 6635; CHECK: # %bb.0: # %entry 6636; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma 6637; CHECK-NEXT: vsoxseg5ei64.v v8, (a0), v13 6638; CHECK-NEXT: ret 6639entry: 6640 tail call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) %val, ptr %base, <vscale x 1 x i64> %index, i64 %vl, i64 5) 6641 ret void 6642} 6643 6644define void @test_vsoxseg5_mask_nxv1i32_triscv.vector.tuple_nxv4i8_5t_nxv1i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) %val, ptr %base, <vscale x 1 x i64> %index, i64 %vl, <vscale x 1 x i1> %mask) { 6645; CHECK-LABEL: test_vsoxseg5_mask_nxv1i32_triscv.vector.tuple_nxv4i8_5t_nxv1i64: 6646; CHECK: # %bb.0: # %entry 6647; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma 6648; CHECK-NEXT: vsoxseg5ei64.v v8, (a0), v13, v0.t 6649; CHECK-NEXT: ret 6650entry: 6651 tail call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i64.nxv1i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) %val, ptr %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 5) 6652 ret void 6653} 6654 6655declare void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 5), ptr, <vscale x 2 x i8>, i64, i64) 6656declare void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i8.nxv2i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 5), ptr, <vscale x 2 x i8>, <vscale x 2 x i1>, i64, i64) 6657 6658define void @test_vsoxseg5_nxv2i32_triscv.vector.tuple_nxv8i8_5t_nxv2i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, <vscale x 2 x i8> %index, i64 %vl) { 6659; CHECK-LABEL: test_vsoxseg5_nxv2i32_triscv.vector.tuple_nxv8i8_5t_nxv2i8: 6660; CHECK: # %bb.0: # %entry 6661; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma 6662; CHECK-NEXT: vsoxseg5ei8.v v8, (a0), v13 6663; CHECK-NEXT: ret 6664entry: 6665 tail call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, <vscale x 2 x i8> %index, i64 %vl, i64 5) 6666 ret void 6667} 6668 6669define void @test_vsoxseg5_mask_nxv2i32_triscv.vector.tuple_nxv8i8_5t_nxv2i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, <vscale x 2 x i8> %index, i64 %vl, <vscale x 2 x i1> %mask) { 6670; CHECK-LABEL: test_vsoxseg5_mask_nxv2i32_triscv.vector.tuple_nxv8i8_5t_nxv2i8: 6671; CHECK: # %bb.0: # %entry 6672; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma 6673; CHECK-NEXT: vsoxseg5ei8.v v8, (a0), v13, v0.t 6674; CHECK-NEXT: ret 6675entry: 6676 tail call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i8.nxv2i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 5) 6677 ret void 6678} 6679 6680declare void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 5), ptr, <vscale x 2 x i16>, i64, i64) 6681declare void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i16.nxv2i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 5), ptr, <vscale x 2 x i16>, <vscale x 2 x i1>, i64, i64) 6682 6683define void @test_vsoxseg5_nxv2i32_triscv.vector.tuple_nxv8i8_5t_nxv2i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, <vscale x 2 x i16> %index, i64 %vl) { 6684; CHECK-LABEL: test_vsoxseg5_nxv2i32_triscv.vector.tuple_nxv8i8_5t_nxv2i16: 6685; CHECK: # %bb.0: # %entry 6686; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma 6687; CHECK-NEXT: vsoxseg5ei16.v v8, (a0), v13 6688; CHECK-NEXT: ret 6689entry: 6690 tail call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, <vscale x 2 x i16> %index, i64 %vl, i64 5) 6691 ret void 6692} 6693 6694define void @test_vsoxseg5_mask_nxv2i32_triscv.vector.tuple_nxv8i8_5t_nxv2i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, <vscale x 2 x i16> %index, i64 %vl, <vscale x 2 x i1> %mask) { 6695; CHECK-LABEL: test_vsoxseg5_mask_nxv2i32_triscv.vector.tuple_nxv8i8_5t_nxv2i16: 6696; CHECK: # %bb.0: # %entry 6697; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma 6698; CHECK-NEXT: vsoxseg5ei16.v v8, (a0), v13, v0.t 6699; CHECK-NEXT: ret 6700entry: 6701 tail call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i16.nxv2i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 5) 6702 ret void 6703} 6704 6705declare void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 5), ptr, <vscale x 2 x i32>, i64, i64) 6706declare void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i32.nxv2i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 5), ptr, <vscale x 2 x i32>, <vscale x 2 x i1>, i64, i64) 6707 6708define void @test_vsoxseg5_nxv2i32_triscv.vector.tuple_nxv8i8_5t_nxv2i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, <vscale x 2 x i32> %index, i64 %vl) { 6709; CHECK-LABEL: test_vsoxseg5_nxv2i32_triscv.vector.tuple_nxv8i8_5t_nxv2i32: 6710; CHECK: # %bb.0: # %entry 6711; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma 6712; CHECK-NEXT: vsoxseg5ei32.v v8, (a0), v13 6713; CHECK-NEXT: ret 6714entry: 6715 tail call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, <vscale x 2 x i32> %index, i64 %vl, i64 5) 6716 ret void 6717} 6718 6719define void @test_vsoxseg5_mask_nxv2i32_triscv.vector.tuple_nxv8i8_5t_nxv2i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, <vscale x 2 x i32> %index, i64 %vl, <vscale x 2 x i1> %mask) { 6720; CHECK-LABEL: test_vsoxseg5_mask_nxv2i32_triscv.vector.tuple_nxv8i8_5t_nxv2i32: 6721; CHECK: # %bb.0: # %entry 6722; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma 6723; CHECK-NEXT: vsoxseg5ei32.v v8, (a0), v13, v0.t 6724; CHECK-NEXT: ret 6725entry: 6726 tail call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i32.nxv2i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 5) 6727 ret void 6728} 6729 6730declare void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 5), ptr, <vscale x 2 x i64>, i64, i64) 6731declare void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i64.nxv2i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 5), ptr, <vscale x 2 x i64>, <vscale x 2 x i1>, i64, i64) 6732 6733define void @test_vsoxseg5_nxv2i32_triscv.vector.tuple_nxv8i8_5t_nxv2i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl) { 6734; CHECK-LABEL: test_vsoxseg5_nxv2i32_triscv.vector.tuple_nxv8i8_5t_nxv2i64: 6735; CHECK: # %bb.0: # %entry 6736; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma 6737; CHECK-NEXT: vsoxseg5ei64.v v8, (a0), v14 6738; CHECK-NEXT: ret 6739entry: 6740 tail call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl, i64 5) 6741 ret void 6742} 6743 6744define void @test_vsoxseg5_mask_nxv2i32_triscv.vector.tuple_nxv8i8_5t_nxv2i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl, <vscale x 2 x i1> %mask) { 6745; CHECK-LABEL: test_vsoxseg5_mask_nxv2i32_triscv.vector.tuple_nxv8i8_5t_nxv2i64: 6746; CHECK: # %bb.0: # %entry 6747; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma 6748; CHECK-NEXT: vsoxseg5ei64.v v8, (a0), v14, v0.t 6749; CHECK-NEXT: ret 6750entry: 6751 tail call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i64.nxv2i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 5) 6752 ret void 6753} 6754 6755declare void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 6), ptr, <vscale x 1 x i8>, i64, i64) 6756declare void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i8.nxv1i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 6), ptr, <vscale x 1 x i8>, <vscale x 1 x i1>, i64, i64) 6757 6758define void @test_vsoxseg6_nxv1i32_triscv.vector.tuple_nxv4i8_6t_nxv1i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) %val, ptr %base, <vscale x 1 x i8> %index, i64 %vl) { 6759; CHECK-LABEL: test_vsoxseg6_nxv1i32_triscv.vector.tuple_nxv4i8_6t_nxv1i8: 6760; CHECK: # %bb.0: # %entry 6761; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma 6762; CHECK-NEXT: vsoxseg6ei8.v v8, (a0), v14 6763; CHECK-NEXT: ret 6764entry: 6765 tail call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) %val, ptr %base, <vscale x 1 x i8> %index, i64 %vl, i64 5) 6766 ret void 6767} 6768 6769define void @test_vsoxseg6_mask_nxv1i32_triscv.vector.tuple_nxv4i8_6t_nxv1i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) %val, ptr %base, <vscale x 1 x i8> %index, i64 %vl, <vscale x 1 x i1> %mask) { 6770; CHECK-LABEL: test_vsoxseg6_mask_nxv1i32_triscv.vector.tuple_nxv4i8_6t_nxv1i8: 6771; CHECK: # %bb.0: # %entry 6772; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma 6773; CHECK-NEXT: vsoxseg6ei8.v v8, (a0), v14, v0.t 6774; CHECK-NEXT: ret 6775entry: 6776 tail call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i8.nxv1i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 5) 6777 ret void 6778} 6779 6780declare void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 6), ptr, <vscale x 1 x i16>, i64, i64) 6781declare void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 6), ptr, <vscale x 1 x i16>, <vscale x 1 x i1>, i64, i64) 6782 6783define void @test_vsoxseg6_nxv1i32_triscv.vector.tuple_nxv4i8_6t_nxv1i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl) { 6784; CHECK-LABEL: test_vsoxseg6_nxv1i32_triscv.vector.tuple_nxv4i8_6t_nxv1i16: 6785; CHECK: # %bb.0: # %entry 6786; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma 6787; CHECK-NEXT: vsoxseg6ei16.v v8, (a0), v14 6788; CHECK-NEXT: ret 6789entry: 6790 tail call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl, i64 5) 6791 ret void 6792} 6793 6794define void @test_vsoxseg6_mask_nxv1i32_triscv.vector.tuple_nxv4i8_6t_nxv1i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl, <vscale x 1 x i1> %mask) { 6795; CHECK-LABEL: test_vsoxseg6_mask_nxv1i32_triscv.vector.tuple_nxv4i8_6t_nxv1i16: 6796; CHECK: # %bb.0: # %entry 6797; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma 6798; CHECK-NEXT: vsoxseg6ei16.v v8, (a0), v14, v0.t 6799; CHECK-NEXT: ret 6800entry: 6801 tail call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 5) 6802 ret void 6803} 6804 6805declare void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 6), ptr, <vscale x 1 x i32>, i64, i64) 6806declare void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i32.nxv1i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 6), ptr, <vscale x 1 x i32>, <vscale x 1 x i1>, i64, i64) 6807 6808define void @test_vsoxseg6_nxv1i32_triscv.vector.tuple_nxv4i8_6t_nxv1i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) %val, ptr %base, <vscale x 1 x i32> %index, i64 %vl) { 6809; CHECK-LABEL: test_vsoxseg6_nxv1i32_triscv.vector.tuple_nxv4i8_6t_nxv1i32: 6810; CHECK: # %bb.0: # %entry 6811; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma 6812; CHECK-NEXT: vsoxseg6ei32.v v8, (a0), v14 6813; CHECK-NEXT: ret 6814entry: 6815 tail call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) %val, ptr %base, <vscale x 1 x i32> %index, i64 %vl, i64 5) 6816 ret void 6817} 6818 6819define void @test_vsoxseg6_mask_nxv1i32_triscv.vector.tuple_nxv4i8_6t_nxv1i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) %val, ptr %base, <vscale x 1 x i32> %index, i64 %vl, <vscale x 1 x i1> %mask) { 6820; CHECK-LABEL: test_vsoxseg6_mask_nxv1i32_triscv.vector.tuple_nxv4i8_6t_nxv1i32: 6821; CHECK: # %bb.0: # %entry 6822; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma 6823; CHECK-NEXT: vsoxseg6ei32.v v8, (a0), v14, v0.t 6824; CHECK-NEXT: ret 6825entry: 6826 tail call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i32.nxv1i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 5) 6827 ret void 6828} 6829 6830declare void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 6), ptr, <vscale x 1 x i64>, i64, i64) 6831declare void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i64.nxv1i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 6), ptr, <vscale x 1 x i64>, <vscale x 1 x i1>, i64, i64) 6832 6833define void @test_vsoxseg6_nxv1i32_triscv.vector.tuple_nxv4i8_6t_nxv1i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) %val, ptr %base, <vscale x 1 x i64> %index, i64 %vl) { 6834; CHECK-LABEL: test_vsoxseg6_nxv1i32_triscv.vector.tuple_nxv4i8_6t_nxv1i64: 6835; CHECK: # %bb.0: # %entry 6836; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma 6837; CHECK-NEXT: vsoxseg6ei64.v v8, (a0), v14 6838; CHECK-NEXT: ret 6839entry: 6840 tail call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) %val, ptr %base, <vscale x 1 x i64> %index, i64 %vl, i64 5) 6841 ret void 6842} 6843 6844define void @test_vsoxseg6_mask_nxv1i32_triscv.vector.tuple_nxv4i8_6t_nxv1i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) %val, ptr %base, <vscale x 1 x i64> %index, i64 %vl, <vscale x 1 x i1> %mask) { 6845; CHECK-LABEL: test_vsoxseg6_mask_nxv1i32_triscv.vector.tuple_nxv4i8_6t_nxv1i64: 6846; CHECK: # %bb.0: # %entry 6847; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma 6848; CHECK-NEXT: vsoxseg6ei64.v v8, (a0), v14, v0.t 6849; CHECK-NEXT: ret 6850entry: 6851 tail call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i64.nxv1i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) %val, ptr %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 5) 6852 ret void 6853} 6854 6855declare void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 6), ptr, <vscale x 2 x i8>, i64, i64) 6856declare void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i8.nxv2i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 6), ptr, <vscale x 2 x i8>, <vscale x 2 x i1>, i64, i64) 6857 6858define void @test_vsoxseg6_nxv2i32_triscv.vector.tuple_nxv8i8_6t_nxv2i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, <vscale x 2 x i8> %index, i64 %vl) { 6859; CHECK-LABEL: test_vsoxseg6_nxv2i32_triscv.vector.tuple_nxv8i8_6t_nxv2i8: 6860; CHECK: # %bb.0: # %entry 6861; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma 6862; CHECK-NEXT: vsoxseg6ei8.v v8, (a0), v14 6863; CHECK-NEXT: ret 6864entry: 6865 tail call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, <vscale x 2 x i8> %index, i64 %vl, i64 5) 6866 ret void 6867} 6868 6869define void @test_vsoxseg6_mask_nxv2i32_triscv.vector.tuple_nxv8i8_6t_nxv2i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, <vscale x 2 x i8> %index, i64 %vl, <vscale x 2 x i1> %mask) { 6870; CHECK-LABEL: test_vsoxseg6_mask_nxv2i32_triscv.vector.tuple_nxv8i8_6t_nxv2i8: 6871; CHECK: # %bb.0: # %entry 6872; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma 6873; CHECK-NEXT: vsoxseg6ei8.v v8, (a0), v14, v0.t 6874; CHECK-NEXT: ret 6875entry: 6876 tail call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i8.nxv2i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 5) 6877 ret void 6878} 6879 6880declare void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 6), ptr, <vscale x 2 x i16>, i64, i64) 6881declare void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i16.nxv2i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 6), ptr, <vscale x 2 x i16>, <vscale x 2 x i1>, i64, i64) 6882 6883define void @test_vsoxseg6_nxv2i32_triscv.vector.tuple_nxv8i8_6t_nxv2i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, <vscale x 2 x i16> %index, i64 %vl) { 6884; CHECK-LABEL: test_vsoxseg6_nxv2i32_triscv.vector.tuple_nxv8i8_6t_nxv2i16: 6885; CHECK: # %bb.0: # %entry 6886; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma 6887; CHECK-NEXT: vsoxseg6ei16.v v8, (a0), v14 6888; CHECK-NEXT: ret 6889entry: 6890 tail call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, <vscale x 2 x i16> %index, i64 %vl, i64 5) 6891 ret void 6892} 6893 6894define void @test_vsoxseg6_mask_nxv2i32_triscv.vector.tuple_nxv8i8_6t_nxv2i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, <vscale x 2 x i16> %index, i64 %vl, <vscale x 2 x i1> %mask) { 6895; CHECK-LABEL: test_vsoxseg6_mask_nxv2i32_triscv.vector.tuple_nxv8i8_6t_nxv2i16: 6896; CHECK: # %bb.0: # %entry 6897; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma 6898; CHECK-NEXT: vsoxseg6ei16.v v8, (a0), v14, v0.t 6899; CHECK-NEXT: ret 6900entry: 6901 tail call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i16.nxv2i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 5) 6902 ret void 6903} 6904 6905declare void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 6), ptr, <vscale x 2 x i32>, i64, i64) 6906declare void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i32.nxv2i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 6), ptr, <vscale x 2 x i32>, <vscale x 2 x i1>, i64, i64) 6907 6908define void @test_vsoxseg6_nxv2i32_triscv.vector.tuple_nxv8i8_6t_nxv2i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, <vscale x 2 x i32> %index, i64 %vl) { 6909; CHECK-LABEL: test_vsoxseg6_nxv2i32_triscv.vector.tuple_nxv8i8_6t_nxv2i32: 6910; CHECK: # %bb.0: # %entry 6911; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma 6912; CHECK-NEXT: vsoxseg6ei32.v v8, (a0), v14 6913; CHECK-NEXT: ret 6914entry: 6915 tail call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, <vscale x 2 x i32> %index, i64 %vl, i64 5) 6916 ret void 6917} 6918 6919define void @test_vsoxseg6_mask_nxv2i32_triscv.vector.tuple_nxv8i8_6t_nxv2i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, <vscale x 2 x i32> %index, i64 %vl, <vscale x 2 x i1> %mask) { 6920; CHECK-LABEL: test_vsoxseg6_mask_nxv2i32_triscv.vector.tuple_nxv8i8_6t_nxv2i32: 6921; CHECK: # %bb.0: # %entry 6922; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma 6923; CHECK-NEXT: vsoxseg6ei32.v v8, (a0), v14, v0.t 6924; CHECK-NEXT: ret 6925entry: 6926 tail call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i32.nxv2i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 5) 6927 ret void 6928} 6929 6930declare void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 6), ptr, <vscale x 2 x i64>, i64, i64) 6931declare void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i64.nxv2i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 6), ptr, <vscale x 2 x i64>, <vscale x 2 x i1>, i64, i64) 6932 6933define void @test_vsoxseg6_nxv2i32_triscv.vector.tuple_nxv8i8_6t_nxv2i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl) { 6934; CHECK-LABEL: test_vsoxseg6_nxv2i32_triscv.vector.tuple_nxv8i8_6t_nxv2i64: 6935; CHECK: # %bb.0: # %entry 6936; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma 6937; CHECK-NEXT: vsoxseg6ei64.v v8, (a0), v14 6938; CHECK-NEXT: ret 6939entry: 6940 tail call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl, i64 5) 6941 ret void 6942} 6943 6944define void @test_vsoxseg6_mask_nxv2i32_triscv.vector.tuple_nxv8i8_6t_nxv2i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl, <vscale x 2 x i1> %mask) { 6945; CHECK-LABEL: test_vsoxseg6_mask_nxv2i32_triscv.vector.tuple_nxv8i8_6t_nxv2i64: 6946; CHECK: # %bb.0: # %entry 6947; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma 6948; CHECK-NEXT: vsoxseg6ei64.v v8, (a0), v14, v0.t 6949; CHECK-NEXT: ret 6950entry: 6951 tail call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i64.nxv2i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 5) 6952 ret void 6953} 6954 6955declare void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 7), ptr, <vscale x 1 x i8>, i64, i64) 6956declare void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i8.nxv1i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 7), ptr, <vscale x 1 x i8>, <vscale x 1 x i1>, i64, i64) 6957 6958define void @test_vsoxseg7_nxv1i32_triscv.vector.tuple_nxv4i8_7t_nxv1i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) %val, ptr %base, <vscale x 1 x i8> %index, i64 %vl) { 6959; CHECK-LABEL: test_vsoxseg7_nxv1i32_triscv.vector.tuple_nxv4i8_7t_nxv1i8: 6960; CHECK: # %bb.0: # %entry 6961; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma 6962; CHECK-NEXT: vsoxseg7ei8.v v8, (a0), v15 6963; CHECK-NEXT: ret 6964entry: 6965 tail call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) %val, ptr %base, <vscale x 1 x i8> %index, i64 %vl, i64 5) 6966 ret void 6967} 6968 6969define void @test_vsoxseg7_mask_nxv1i32_triscv.vector.tuple_nxv4i8_7t_nxv1i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) %val, ptr %base, <vscale x 1 x i8> %index, i64 %vl, <vscale x 1 x i1> %mask) { 6970; CHECK-LABEL: test_vsoxseg7_mask_nxv1i32_triscv.vector.tuple_nxv4i8_7t_nxv1i8: 6971; CHECK: # %bb.0: # %entry 6972; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma 6973; CHECK-NEXT: vsoxseg7ei8.v v8, (a0), v15, v0.t 6974; CHECK-NEXT: ret 6975entry: 6976 tail call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i8.nxv1i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 5) 6977 ret void 6978} 6979 6980declare void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 7), ptr, <vscale x 1 x i16>, i64, i64) 6981declare void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 7), ptr, <vscale x 1 x i16>, <vscale x 1 x i1>, i64, i64) 6982 6983define void @test_vsoxseg7_nxv1i32_triscv.vector.tuple_nxv4i8_7t_nxv1i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl) { 6984; CHECK-LABEL: test_vsoxseg7_nxv1i32_triscv.vector.tuple_nxv4i8_7t_nxv1i16: 6985; CHECK: # %bb.0: # %entry 6986; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma 6987; CHECK-NEXT: vsoxseg7ei16.v v8, (a0), v15 6988; CHECK-NEXT: ret 6989entry: 6990 tail call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl, i64 5) 6991 ret void 6992} 6993 6994define void @test_vsoxseg7_mask_nxv1i32_triscv.vector.tuple_nxv4i8_7t_nxv1i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl, <vscale x 1 x i1> %mask) { 6995; CHECK-LABEL: test_vsoxseg7_mask_nxv1i32_triscv.vector.tuple_nxv4i8_7t_nxv1i16: 6996; CHECK: # %bb.0: # %entry 6997; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma 6998; CHECK-NEXT: vsoxseg7ei16.v v8, (a0), v15, v0.t 6999; CHECK-NEXT: ret 7000entry: 7001 tail call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 5) 7002 ret void 7003} 7004 7005declare void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 7), ptr, <vscale x 1 x i32>, i64, i64) 7006declare void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i32.nxv1i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 7), ptr, <vscale x 1 x i32>, <vscale x 1 x i1>, i64, i64) 7007 7008define void @test_vsoxseg7_nxv1i32_triscv.vector.tuple_nxv4i8_7t_nxv1i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) %val, ptr %base, <vscale x 1 x i32> %index, i64 %vl) { 7009; CHECK-LABEL: test_vsoxseg7_nxv1i32_triscv.vector.tuple_nxv4i8_7t_nxv1i32: 7010; CHECK: # %bb.0: # %entry 7011; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma 7012; CHECK-NEXT: vsoxseg7ei32.v v8, (a0), v15 7013; CHECK-NEXT: ret 7014entry: 7015 tail call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) %val, ptr %base, <vscale x 1 x i32> %index, i64 %vl, i64 5) 7016 ret void 7017} 7018 7019define void @test_vsoxseg7_mask_nxv1i32_triscv.vector.tuple_nxv4i8_7t_nxv1i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) %val, ptr %base, <vscale x 1 x i32> %index, i64 %vl, <vscale x 1 x i1> %mask) { 7020; CHECK-LABEL: test_vsoxseg7_mask_nxv1i32_triscv.vector.tuple_nxv4i8_7t_nxv1i32: 7021; CHECK: # %bb.0: # %entry 7022; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma 7023; CHECK-NEXT: vsoxseg7ei32.v v8, (a0), v15, v0.t 7024; CHECK-NEXT: ret 7025entry: 7026 tail call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i32.nxv1i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 5) 7027 ret void 7028} 7029 7030declare void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 7), ptr, <vscale x 1 x i64>, i64, i64) 7031declare void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i64.nxv1i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 7), ptr, <vscale x 1 x i64>, <vscale x 1 x i1>, i64, i64) 7032 7033define void @test_vsoxseg7_nxv1i32_triscv.vector.tuple_nxv4i8_7t_nxv1i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) %val, ptr %base, <vscale x 1 x i64> %index, i64 %vl) { 7034; CHECK-LABEL: test_vsoxseg7_nxv1i32_triscv.vector.tuple_nxv4i8_7t_nxv1i64: 7035; CHECK: # %bb.0: # %entry 7036; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma 7037; CHECK-NEXT: vsoxseg7ei64.v v8, (a0), v15 7038; CHECK-NEXT: ret 7039entry: 7040 tail call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) %val, ptr %base, <vscale x 1 x i64> %index, i64 %vl, i64 5) 7041 ret void 7042} 7043 7044define void @test_vsoxseg7_mask_nxv1i32_triscv.vector.tuple_nxv4i8_7t_nxv1i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) %val, ptr %base, <vscale x 1 x i64> %index, i64 %vl, <vscale x 1 x i1> %mask) { 7045; CHECK-LABEL: test_vsoxseg7_mask_nxv1i32_triscv.vector.tuple_nxv4i8_7t_nxv1i64: 7046; CHECK: # %bb.0: # %entry 7047; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma 7048; CHECK-NEXT: vsoxseg7ei64.v v8, (a0), v15, v0.t 7049; CHECK-NEXT: ret 7050entry: 7051 tail call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i64.nxv1i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) %val, ptr %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 5) 7052 ret void 7053} 7054 7055declare void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 7), ptr, <vscale x 2 x i8>, i64, i64) 7056declare void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i8.nxv2i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 7), ptr, <vscale x 2 x i8>, <vscale x 2 x i1>, i64, i64) 7057 7058define void @test_vsoxseg7_nxv2i32_triscv.vector.tuple_nxv8i8_7t_nxv2i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, <vscale x 2 x i8> %index, i64 %vl) { 7059; CHECK-LABEL: test_vsoxseg7_nxv2i32_triscv.vector.tuple_nxv8i8_7t_nxv2i8: 7060; CHECK: # %bb.0: # %entry 7061; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma 7062; CHECK-NEXT: vsoxseg7ei8.v v8, (a0), v15 7063; CHECK-NEXT: ret 7064entry: 7065 tail call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, <vscale x 2 x i8> %index, i64 %vl, i64 5) 7066 ret void 7067} 7068 7069define void @test_vsoxseg7_mask_nxv2i32_triscv.vector.tuple_nxv8i8_7t_nxv2i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, <vscale x 2 x i8> %index, i64 %vl, <vscale x 2 x i1> %mask) { 7070; CHECK-LABEL: test_vsoxseg7_mask_nxv2i32_triscv.vector.tuple_nxv8i8_7t_nxv2i8: 7071; CHECK: # %bb.0: # %entry 7072; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma 7073; CHECK-NEXT: vsoxseg7ei8.v v8, (a0), v15, v0.t 7074; CHECK-NEXT: ret 7075entry: 7076 tail call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i8.nxv2i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 5) 7077 ret void 7078} 7079 7080declare void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 7), ptr, <vscale x 2 x i16>, i64, i64) 7081declare void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i16.nxv2i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 7), ptr, <vscale x 2 x i16>, <vscale x 2 x i1>, i64, i64) 7082 7083define void @test_vsoxseg7_nxv2i32_triscv.vector.tuple_nxv8i8_7t_nxv2i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, <vscale x 2 x i16> %index, i64 %vl) { 7084; CHECK-LABEL: test_vsoxseg7_nxv2i32_triscv.vector.tuple_nxv8i8_7t_nxv2i16: 7085; CHECK: # %bb.0: # %entry 7086; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma 7087; CHECK-NEXT: vsoxseg7ei16.v v8, (a0), v15 7088; CHECK-NEXT: ret 7089entry: 7090 tail call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, <vscale x 2 x i16> %index, i64 %vl, i64 5) 7091 ret void 7092} 7093 7094define void @test_vsoxseg7_mask_nxv2i32_triscv.vector.tuple_nxv8i8_7t_nxv2i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, <vscale x 2 x i16> %index, i64 %vl, <vscale x 2 x i1> %mask) { 7095; CHECK-LABEL: test_vsoxseg7_mask_nxv2i32_triscv.vector.tuple_nxv8i8_7t_nxv2i16: 7096; CHECK: # %bb.0: # %entry 7097; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma 7098; CHECK-NEXT: vsoxseg7ei16.v v8, (a0), v15, v0.t 7099; CHECK-NEXT: ret 7100entry: 7101 tail call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i16.nxv2i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 5) 7102 ret void 7103} 7104 7105declare void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 7), ptr, <vscale x 2 x i32>, i64, i64) 7106declare void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i32.nxv2i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 7), ptr, <vscale x 2 x i32>, <vscale x 2 x i1>, i64, i64) 7107 7108define void @test_vsoxseg7_nxv2i32_triscv.vector.tuple_nxv8i8_7t_nxv2i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, <vscale x 2 x i32> %index, i64 %vl) { 7109; CHECK-LABEL: test_vsoxseg7_nxv2i32_triscv.vector.tuple_nxv8i8_7t_nxv2i32: 7110; CHECK: # %bb.0: # %entry 7111; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma 7112; CHECK-NEXT: vsoxseg7ei32.v v8, (a0), v15 7113; CHECK-NEXT: ret 7114entry: 7115 tail call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, <vscale x 2 x i32> %index, i64 %vl, i64 5) 7116 ret void 7117} 7118 7119define void @test_vsoxseg7_mask_nxv2i32_triscv.vector.tuple_nxv8i8_7t_nxv2i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, <vscale x 2 x i32> %index, i64 %vl, <vscale x 2 x i1> %mask) { 7120; CHECK-LABEL: test_vsoxseg7_mask_nxv2i32_triscv.vector.tuple_nxv8i8_7t_nxv2i32: 7121; CHECK: # %bb.0: # %entry 7122; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma 7123; CHECK-NEXT: vsoxseg7ei32.v v8, (a0), v15, v0.t 7124; CHECK-NEXT: ret 7125entry: 7126 tail call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i32.nxv2i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 5) 7127 ret void 7128} 7129 7130declare void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 7), ptr, <vscale x 2 x i64>, i64, i64) 7131declare void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i64.nxv2i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 7), ptr, <vscale x 2 x i64>, <vscale x 2 x i1>, i64, i64) 7132 7133define void @test_vsoxseg7_nxv2i32_triscv.vector.tuple_nxv8i8_7t_nxv2i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl) { 7134; CHECK-LABEL: test_vsoxseg7_nxv2i32_triscv.vector.tuple_nxv8i8_7t_nxv2i64: 7135; CHECK: # %bb.0: # %entry 7136; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma 7137; CHECK-NEXT: vsoxseg7ei64.v v8, (a0), v16 7138; CHECK-NEXT: ret 7139entry: 7140 tail call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl, i64 5) 7141 ret void 7142} 7143 7144define void @test_vsoxseg7_mask_nxv2i32_triscv.vector.tuple_nxv8i8_7t_nxv2i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl, <vscale x 2 x i1> %mask) { 7145; CHECK-LABEL: test_vsoxseg7_mask_nxv2i32_triscv.vector.tuple_nxv8i8_7t_nxv2i64: 7146; CHECK: # %bb.0: # %entry 7147; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma 7148; CHECK-NEXT: vsoxseg7ei64.v v8, (a0), v16, v0.t 7149; CHECK-NEXT: ret 7150entry: 7151 tail call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i64.nxv2i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 5) 7152 ret void 7153} 7154 7155declare void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 8), ptr, <vscale x 1 x i8>, i64, i64) 7156declare void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i8.nxv1i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 8), ptr, <vscale x 1 x i8>, <vscale x 1 x i1>, i64, i64) 7157 7158define void @test_vsoxseg8_nxv1i32_triscv.vector.tuple_nxv4i8_8t_nxv1i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %val, ptr %base, <vscale x 1 x i8> %index, i64 %vl) { 7159; CHECK-LABEL: test_vsoxseg8_nxv1i32_triscv.vector.tuple_nxv4i8_8t_nxv1i8: 7160; CHECK: # %bb.0: # %entry 7161; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma 7162; CHECK-NEXT: vsoxseg8ei8.v v8, (a0), v16 7163; CHECK-NEXT: ret 7164entry: 7165 tail call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %val, ptr %base, <vscale x 1 x i8> %index, i64 %vl, i64 5) 7166 ret void 7167} 7168 7169define void @test_vsoxseg8_mask_nxv1i32_triscv.vector.tuple_nxv4i8_8t_nxv1i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %val, ptr %base, <vscale x 1 x i8> %index, i64 %vl, <vscale x 1 x i1> %mask) { 7170; CHECK-LABEL: test_vsoxseg8_mask_nxv1i32_triscv.vector.tuple_nxv4i8_8t_nxv1i8: 7171; CHECK: # %bb.0: # %entry 7172; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma 7173; CHECK-NEXT: vsoxseg8ei8.v v8, (a0), v16, v0.t 7174; CHECK-NEXT: ret 7175entry: 7176 tail call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i8.nxv1i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 5) 7177 ret void 7178} 7179 7180declare void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 8), ptr, <vscale x 1 x i16>, i64, i64) 7181declare void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 8), ptr, <vscale x 1 x i16>, <vscale x 1 x i1>, i64, i64) 7182 7183define void @test_vsoxseg8_nxv1i32_triscv.vector.tuple_nxv4i8_8t_nxv1i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl) { 7184; CHECK-LABEL: test_vsoxseg8_nxv1i32_triscv.vector.tuple_nxv4i8_8t_nxv1i16: 7185; CHECK: # %bb.0: # %entry 7186; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma 7187; CHECK-NEXT: vsoxseg8ei16.v v8, (a0), v16 7188; CHECK-NEXT: ret 7189entry: 7190 tail call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl, i64 5) 7191 ret void 7192} 7193 7194define void @test_vsoxseg8_mask_nxv1i32_triscv.vector.tuple_nxv4i8_8t_nxv1i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl, <vscale x 1 x i1> %mask) { 7195; CHECK-LABEL: test_vsoxseg8_mask_nxv1i32_triscv.vector.tuple_nxv4i8_8t_nxv1i16: 7196; CHECK: # %bb.0: # %entry 7197; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma 7198; CHECK-NEXT: vsoxseg8ei16.v v8, (a0), v16, v0.t 7199; CHECK-NEXT: ret 7200entry: 7201 tail call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 5) 7202 ret void 7203} 7204 7205declare void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 8), ptr, <vscale x 1 x i32>, i64, i64) 7206declare void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 8), ptr, <vscale x 1 x i32>, <vscale x 1 x i1>, i64, i64) 7207 7208define void @test_vsoxseg8_nxv1i32_triscv.vector.tuple_nxv4i8_8t_nxv1i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %val, ptr %base, <vscale x 1 x i32> %index, i64 %vl) { 7209; CHECK-LABEL: test_vsoxseg8_nxv1i32_triscv.vector.tuple_nxv4i8_8t_nxv1i32: 7210; CHECK: # %bb.0: # %entry 7211; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma 7212; CHECK-NEXT: vsoxseg8ei32.v v8, (a0), v16 7213; CHECK-NEXT: ret 7214entry: 7215 tail call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %val, ptr %base, <vscale x 1 x i32> %index, i64 %vl, i64 5) 7216 ret void 7217} 7218 7219define void @test_vsoxseg8_mask_nxv1i32_triscv.vector.tuple_nxv4i8_8t_nxv1i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %val, ptr %base, <vscale x 1 x i32> %index, i64 %vl, <vscale x 1 x i1> %mask) { 7220; CHECK-LABEL: test_vsoxseg8_mask_nxv1i32_triscv.vector.tuple_nxv4i8_8t_nxv1i32: 7221; CHECK: # %bb.0: # %entry 7222; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma 7223; CHECK-NEXT: vsoxseg8ei32.v v8, (a0), v16, v0.t 7224; CHECK-NEXT: ret 7225entry: 7226 tail call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 5) 7227 ret void 7228} 7229 7230declare void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 8), ptr, <vscale x 1 x i64>, i64, i64) 7231declare void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i64.nxv1i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 8), ptr, <vscale x 1 x i64>, <vscale x 1 x i1>, i64, i64) 7232 7233define void @test_vsoxseg8_nxv1i32_triscv.vector.tuple_nxv4i8_8t_nxv1i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %val, ptr %base, <vscale x 1 x i64> %index, i64 %vl) { 7234; CHECK-LABEL: test_vsoxseg8_nxv1i32_triscv.vector.tuple_nxv4i8_8t_nxv1i64: 7235; CHECK: # %bb.0: # %entry 7236; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma 7237; CHECK-NEXT: vsoxseg8ei64.v v8, (a0), v16 7238; CHECK-NEXT: ret 7239entry: 7240 tail call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %val, ptr %base, <vscale x 1 x i64> %index, i64 %vl, i64 5) 7241 ret void 7242} 7243 7244define void @test_vsoxseg8_mask_nxv1i32_triscv.vector.tuple_nxv4i8_8t_nxv1i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %val, ptr %base, <vscale x 1 x i64> %index, i64 %vl, <vscale x 1 x i1> %mask) { 7245; CHECK-LABEL: test_vsoxseg8_mask_nxv1i32_triscv.vector.tuple_nxv4i8_8t_nxv1i64: 7246; CHECK: # %bb.0: # %entry 7247; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma 7248; CHECK-NEXT: vsoxseg8ei64.v v8, (a0), v16, v0.t 7249; CHECK-NEXT: ret 7250entry: 7251 tail call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i64.nxv1i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %val, ptr %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 5) 7252 ret void 7253} 7254 7255declare void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 8), ptr, <vscale x 2 x i8>, i64, i64) 7256declare void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i8.nxv2i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 8), ptr, <vscale x 2 x i8>, <vscale x 2 x i1>, i64, i64) 7257 7258define void @test_vsoxseg8_nxv2i32_triscv.vector.tuple_nxv8i8_8t_nxv2i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, <vscale x 2 x i8> %index, i64 %vl) { 7259; CHECK-LABEL: test_vsoxseg8_nxv2i32_triscv.vector.tuple_nxv8i8_8t_nxv2i8: 7260; CHECK: # %bb.0: # %entry 7261; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma 7262; CHECK-NEXT: vsoxseg8ei8.v v8, (a0), v16 7263; CHECK-NEXT: ret 7264entry: 7265 tail call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, <vscale x 2 x i8> %index, i64 %vl, i64 5) 7266 ret void 7267} 7268 7269define void @test_vsoxseg8_mask_nxv2i32_triscv.vector.tuple_nxv8i8_8t_nxv2i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, <vscale x 2 x i8> %index, i64 %vl, <vscale x 2 x i1> %mask) { 7270; CHECK-LABEL: test_vsoxseg8_mask_nxv2i32_triscv.vector.tuple_nxv8i8_8t_nxv2i8: 7271; CHECK: # %bb.0: # %entry 7272; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma 7273; CHECK-NEXT: vsoxseg8ei8.v v8, (a0), v16, v0.t 7274; CHECK-NEXT: ret 7275entry: 7276 tail call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i8.nxv2i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 5) 7277 ret void 7278} 7279 7280declare void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 8), ptr, <vscale x 2 x i16>, i64, i64) 7281declare void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i16.nxv2i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 8), ptr, <vscale x 2 x i16>, <vscale x 2 x i1>, i64, i64) 7282 7283define void @test_vsoxseg8_nxv2i32_triscv.vector.tuple_nxv8i8_8t_nxv2i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, <vscale x 2 x i16> %index, i64 %vl) { 7284; CHECK-LABEL: test_vsoxseg8_nxv2i32_triscv.vector.tuple_nxv8i8_8t_nxv2i16: 7285; CHECK: # %bb.0: # %entry 7286; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma 7287; CHECK-NEXT: vsoxseg8ei16.v v8, (a0), v16 7288; CHECK-NEXT: ret 7289entry: 7290 tail call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, <vscale x 2 x i16> %index, i64 %vl, i64 5) 7291 ret void 7292} 7293 7294define void @test_vsoxseg8_mask_nxv2i32_triscv.vector.tuple_nxv8i8_8t_nxv2i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, <vscale x 2 x i16> %index, i64 %vl, <vscale x 2 x i1> %mask) { 7295; CHECK-LABEL: test_vsoxseg8_mask_nxv2i32_triscv.vector.tuple_nxv8i8_8t_nxv2i16: 7296; CHECK: # %bb.0: # %entry 7297; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma 7298; CHECK-NEXT: vsoxseg8ei16.v v8, (a0), v16, v0.t 7299; CHECK-NEXT: ret 7300entry: 7301 tail call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i16.nxv2i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 5) 7302 ret void 7303} 7304 7305declare void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 8), ptr, <vscale x 2 x i32>, i64, i64) 7306declare void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 8), ptr, <vscale x 2 x i32>, <vscale x 2 x i1>, i64, i64) 7307 7308define void @test_vsoxseg8_nxv2i32_triscv.vector.tuple_nxv8i8_8t_nxv2i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, <vscale x 2 x i32> %index, i64 %vl) { 7309; CHECK-LABEL: test_vsoxseg8_nxv2i32_triscv.vector.tuple_nxv8i8_8t_nxv2i32: 7310; CHECK: # %bb.0: # %entry 7311; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma 7312; CHECK-NEXT: vsoxseg8ei32.v v8, (a0), v16 7313; CHECK-NEXT: ret 7314entry: 7315 tail call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, <vscale x 2 x i32> %index, i64 %vl, i64 5) 7316 ret void 7317} 7318 7319define void @test_vsoxseg8_mask_nxv2i32_triscv.vector.tuple_nxv8i8_8t_nxv2i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, <vscale x 2 x i32> %index, i64 %vl, <vscale x 2 x i1> %mask) { 7320; CHECK-LABEL: test_vsoxseg8_mask_nxv2i32_triscv.vector.tuple_nxv8i8_8t_nxv2i32: 7321; CHECK: # %bb.0: # %entry 7322; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma 7323; CHECK-NEXT: vsoxseg8ei32.v v8, (a0), v16, v0.t 7324; CHECK-NEXT: ret 7325entry: 7326 tail call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 5) 7327 ret void 7328} 7329 7330declare void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 8), ptr, <vscale x 2 x i64>, i64, i64) 7331declare void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i64.nxv2i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 8), ptr, <vscale x 2 x i64>, <vscale x 2 x i1>, i64, i64) 7332 7333define void @test_vsoxseg8_nxv2i32_triscv.vector.tuple_nxv8i8_8t_nxv2i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl) { 7334; CHECK-LABEL: test_vsoxseg8_nxv2i32_triscv.vector.tuple_nxv8i8_8t_nxv2i64: 7335; CHECK: # %bb.0: # %entry 7336; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma 7337; CHECK-NEXT: vsoxseg8ei64.v v8, (a0), v16 7338; CHECK-NEXT: ret 7339entry: 7340 tail call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl, i64 5) 7341 ret void 7342} 7343 7344define void @test_vsoxseg8_mask_nxv2i32_triscv.vector.tuple_nxv8i8_8t_nxv2i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl, <vscale x 2 x i1> %mask) { 7345; CHECK-LABEL: test_vsoxseg8_mask_nxv2i32_triscv.vector.tuple_nxv8i8_8t_nxv2i64: 7346; CHECK: # %bb.0: # %entry 7347; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma 7348; CHECK-NEXT: vsoxseg8ei64.v v8, (a0), v16, v0.t 7349; CHECK-NEXT: ret 7350entry: 7351 tail call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i64.nxv2i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 5) 7352 ret void 7353} 7354 7355declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 2), ptr, <vscale x 1 x i8>, i64, i64) 7356declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i8.nxv1i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 2), ptr, <vscale x 1 x i8>, <vscale x 1 x i1>, i64, i64) 7357 7358define void @test_vsoxseg2_nxv1i64_triscv.vector.tuple_nxv8i8_2t_nxv1i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, <vscale x 1 x i8> %index, i64 %vl) { 7359; CHECK-LABEL: test_vsoxseg2_nxv1i64_triscv.vector.tuple_nxv8i8_2t_nxv1i8: 7360; CHECK: # %bb.0: # %entry 7361; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma 7362; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10 7363; CHECK-NEXT: ret 7364entry: 7365 tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, <vscale x 1 x i8> %index, i64 %vl, i64 6) 7366 ret void 7367} 7368 7369define void @test_vsoxseg2_mask_nxv1i64_triscv.vector.tuple_nxv8i8_2t_nxv1i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, <vscale x 1 x i8> %index, i64 %vl, <vscale x 1 x i1> %mask) { 7370; CHECK-LABEL: test_vsoxseg2_mask_nxv1i64_triscv.vector.tuple_nxv8i8_2t_nxv1i8: 7371; CHECK: # %bb.0: # %entry 7372; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma 7373; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10, v0.t 7374; CHECK-NEXT: ret 7375entry: 7376 tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i8.nxv1i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 6) 7377 ret void 7378} 7379 7380declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 2), ptr, <vscale x 1 x i16>, i64, i64) 7381declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i16.nxv1i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 2), ptr, <vscale x 1 x i16>, <vscale x 1 x i1>, i64, i64) 7382 7383define void @test_vsoxseg2_nxv1i64_triscv.vector.tuple_nxv8i8_2t_nxv1i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl) { 7384; CHECK-LABEL: test_vsoxseg2_nxv1i64_triscv.vector.tuple_nxv8i8_2t_nxv1i16: 7385; CHECK: # %bb.0: # %entry 7386; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma 7387; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10 7388; CHECK-NEXT: ret 7389entry: 7390 tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl, i64 6) 7391 ret void 7392} 7393 7394define void @test_vsoxseg2_mask_nxv1i64_triscv.vector.tuple_nxv8i8_2t_nxv1i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl, <vscale x 1 x i1> %mask) { 7395; CHECK-LABEL: test_vsoxseg2_mask_nxv1i64_triscv.vector.tuple_nxv8i8_2t_nxv1i16: 7396; CHECK: # %bb.0: # %entry 7397; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma 7398; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10, v0.t 7399; CHECK-NEXT: ret 7400entry: 7401 tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i16.nxv1i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 6) 7402 ret void 7403} 7404 7405declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 2), ptr, <vscale x 1 x i32>, i64, i64) 7406declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i32.nxv1i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 2), ptr, <vscale x 1 x i32>, <vscale x 1 x i1>, i64, i64) 7407 7408define void @test_vsoxseg2_nxv1i64_triscv.vector.tuple_nxv8i8_2t_nxv1i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, <vscale x 1 x i32> %index, i64 %vl) { 7409; CHECK-LABEL: test_vsoxseg2_nxv1i64_triscv.vector.tuple_nxv8i8_2t_nxv1i32: 7410; CHECK: # %bb.0: # %entry 7411; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma 7412; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10 7413; CHECK-NEXT: ret 7414entry: 7415 tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, <vscale x 1 x i32> %index, i64 %vl, i64 6) 7416 ret void 7417} 7418 7419define void @test_vsoxseg2_mask_nxv1i64_triscv.vector.tuple_nxv8i8_2t_nxv1i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, <vscale x 1 x i32> %index, i64 %vl, <vscale x 1 x i1> %mask) { 7420; CHECK-LABEL: test_vsoxseg2_mask_nxv1i64_triscv.vector.tuple_nxv8i8_2t_nxv1i32: 7421; CHECK: # %bb.0: # %entry 7422; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma 7423; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10, v0.t 7424; CHECK-NEXT: ret 7425entry: 7426 tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i32.nxv1i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 6) 7427 ret void 7428} 7429 7430declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 2), ptr, <vscale x 1 x i64>, i64, i64) 7431declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i64.nxv1i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 2), ptr, <vscale x 1 x i64>, <vscale x 1 x i1>, i64, i64) 7432 7433define void @test_vsoxseg2_nxv1i64_triscv.vector.tuple_nxv8i8_2t_nxv1i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, <vscale x 1 x i64> %index, i64 %vl) { 7434; CHECK-LABEL: test_vsoxseg2_nxv1i64_triscv.vector.tuple_nxv8i8_2t_nxv1i64: 7435; CHECK: # %bb.0: # %entry 7436; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma 7437; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v10 7438; CHECK-NEXT: ret 7439entry: 7440 tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, <vscale x 1 x i64> %index, i64 %vl, i64 6) 7441 ret void 7442} 7443 7444define void @test_vsoxseg2_mask_nxv1i64_triscv.vector.tuple_nxv8i8_2t_nxv1i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, <vscale x 1 x i64> %index, i64 %vl, <vscale x 1 x i1> %mask) { 7445; CHECK-LABEL: test_vsoxseg2_mask_nxv1i64_triscv.vector.tuple_nxv8i8_2t_nxv1i64: 7446; CHECK: # %bb.0: # %entry 7447; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma 7448; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v10, v0.t 7449; CHECK-NEXT: ret 7450entry: 7451 tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i64.nxv1i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 6) 7452 ret void 7453} 7454 7455declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i8(target("riscv.vector.tuple", <vscale x 16 x i8>, 2), ptr, <vscale x 2 x i8>, i64, i64) 7456declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i8.nxv2i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 2), ptr, <vscale x 2 x i8>, <vscale x 2 x i1>, i64, i64) 7457 7458define void @test_vsoxseg2_nxv2i64_triscv.vector.tuple_nxv16i8_2t_nxv2i8(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, <vscale x 2 x i8> %index, i64 %vl) { 7459; CHECK-LABEL: test_vsoxseg2_nxv2i64_triscv.vector.tuple_nxv16i8_2t_nxv2i8: 7460; CHECK: # %bb.0: # %entry 7461; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma 7462; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v12 7463; CHECK-NEXT: ret 7464entry: 7465 tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i8(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, <vscale x 2 x i8> %index, i64 %vl, i64 6) 7466 ret void 7467} 7468 7469define void @test_vsoxseg2_mask_nxv2i64_triscv.vector.tuple_nxv16i8_2t_nxv2i8(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, <vscale x 2 x i8> %index, i64 %vl, <vscale x 2 x i1> %mask) { 7470; CHECK-LABEL: test_vsoxseg2_mask_nxv2i64_triscv.vector.tuple_nxv16i8_2t_nxv2i8: 7471; CHECK: # %bb.0: # %entry 7472; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma 7473; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v12, v0.t 7474; CHECK-NEXT: ret 7475entry: 7476 tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i8.nxv2i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 6) 7477 ret void 7478} 7479 7480declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i16(target("riscv.vector.tuple", <vscale x 16 x i8>, 2), ptr, <vscale x 2 x i16>, i64, i64) 7481declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i16.nxv2i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 2), ptr, <vscale x 2 x i16>, <vscale x 2 x i1>, i64, i64) 7482 7483define void @test_vsoxseg2_nxv2i64_triscv.vector.tuple_nxv16i8_2t_nxv2i16(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, <vscale x 2 x i16> %index, i64 %vl) { 7484; CHECK-LABEL: test_vsoxseg2_nxv2i64_triscv.vector.tuple_nxv16i8_2t_nxv2i16: 7485; CHECK: # %bb.0: # %entry 7486; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma 7487; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v12 7488; CHECK-NEXT: ret 7489entry: 7490 tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i16(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, <vscale x 2 x i16> %index, i64 %vl, i64 6) 7491 ret void 7492} 7493 7494define void @test_vsoxseg2_mask_nxv2i64_triscv.vector.tuple_nxv16i8_2t_nxv2i16(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, <vscale x 2 x i16> %index, i64 %vl, <vscale x 2 x i1> %mask) { 7495; CHECK-LABEL: test_vsoxseg2_mask_nxv2i64_triscv.vector.tuple_nxv16i8_2t_nxv2i16: 7496; CHECK: # %bb.0: # %entry 7497; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma 7498; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v12, v0.t 7499; CHECK-NEXT: ret 7500entry: 7501 tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i16.nxv2i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 6) 7502 ret void 7503} 7504 7505declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i32(target("riscv.vector.tuple", <vscale x 16 x i8>, 2), ptr, <vscale x 2 x i32>, i64, i64) 7506declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i32.nxv2i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 2), ptr, <vscale x 2 x i32>, <vscale x 2 x i1>, i64, i64) 7507 7508define void @test_vsoxseg2_nxv2i64_triscv.vector.tuple_nxv16i8_2t_nxv2i32(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, <vscale x 2 x i32> %index, i64 %vl) { 7509; CHECK-LABEL: test_vsoxseg2_nxv2i64_triscv.vector.tuple_nxv16i8_2t_nxv2i32: 7510; CHECK: # %bb.0: # %entry 7511; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma 7512; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v12 7513; CHECK-NEXT: ret 7514entry: 7515 tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i32(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, <vscale x 2 x i32> %index, i64 %vl, i64 6) 7516 ret void 7517} 7518 7519define void @test_vsoxseg2_mask_nxv2i64_triscv.vector.tuple_nxv16i8_2t_nxv2i32(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, <vscale x 2 x i32> %index, i64 %vl, <vscale x 2 x i1> %mask) { 7520; CHECK-LABEL: test_vsoxseg2_mask_nxv2i64_triscv.vector.tuple_nxv16i8_2t_nxv2i32: 7521; CHECK: # %bb.0: # %entry 7522; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma 7523; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v12, v0.t 7524; CHECK-NEXT: ret 7525entry: 7526 tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i32.nxv2i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 6) 7527 ret void 7528} 7529 7530declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 2), ptr, <vscale x 2 x i64>, i64, i64) 7531declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i64.nxv2i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 2), ptr, <vscale x 2 x i64>, <vscale x 2 x i1>, i64, i64) 7532 7533define void @test_vsoxseg2_nxv2i64_triscv.vector.tuple_nxv16i8_2t_nxv2i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl) { 7534; CHECK-LABEL: test_vsoxseg2_nxv2i64_triscv.vector.tuple_nxv16i8_2t_nxv2i64: 7535; CHECK: # %bb.0: # %entry 7536; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma 7537; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v12 7538; CHECK-NEXT: ret 7539entry: 7540 tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl, i64 6) 7541 ret void 7542} 7543 7544define void @test_vsoxseg2_mask_nxv2i64_triscv.vector.tuple_nxv16i8_2t_nxv2i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl, <vscale x 2 x i1> %mask) { 7545; CHECK-LABEL: test_vsoxseg2_mask_nxv2i64_triscv.vector.tuple_nxv16i8_2t_nxv2i64: 7546; CHECK: # %bb.0: # %entry 7547; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma 7548; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v12, v0.t 7549; CHECK-NEXT: ret 7550entry: 7551 tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i64.nxv2i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 6) 7552 ret void 7553} 7554 7555declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i8(target("riscv.vector.tuple", <vscale x 32 x i8>, 2), ptr, <vscale x 4 x i8>, i64, i64) 7556declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i8.nxv4i1(target("riscv.vector.tuple", <vscale x 32 x i8>, 2), ptr, <vscale x 4 x i8>, <vscale x 4 x i1>, i64, i64) 7557 7558define void @test_vsoxseg2_nxv4i64_triscv.vector.tuple_nxv32i8_2t_nxv4i8(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, <vscale x 4 x i8> %index, i64 %vl) { 7559; CHECK-LABEL: test_vsoxseg2_nxv4i64_triscv.vector.tuple_nxv32i8_2t_nxv4i8: 7560; CHECK: # %bb.0: # %entry 7561; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma 7562; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v16 7563; CHECK-NEXT: ret 7564entry: 7565 tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i8(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, <vscale x 4 x i8> %index, i64 %vl, i64 6) 7566 ret void 7567} 7568 7569define void @test_vsoxseg2_mask_nxv4i64_triscv.vector.tuple_nxv32i8_2t_nxv4i8(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, <vscale x 4 x i8> %index, i64 %vl, <vscale x 4 x i1> %mask) { 7570; CHECK-LABEL: test_vsoxseg2_mask_nxv4i64_triscv.vector.tuple_nxv32i8_2t_nxv4i8: 7571; CHECK: # %bb.0: # %entry 7572; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma 7573; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v16, v0.t 7574; CHECK-NEXT: ret 7575entry: 7576 tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i8.nxv4i1(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 6) 7577 ret void 7578} 7579 7580declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i16(target("riscv.vector.tuple", <vscale x 32 x i8>, 2), ptr, <vscale x 4 x i16>, i64, i64) 7581declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i16.nxv4i1(target("riscv.vector.tuple", <vscale x 32 x i8>, 2), ptr, <vscale x 4 x i16>, <vscale x 4 x i1>, i64, i64) 7582 7583define void @test_vsoxseg2_nxv4i64_triscv.vector.tuple_nxv32i8_2t_nxv4i16(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, <vscale x 4 x i16> %index, i64 %vl) { 7584; CHECK-LABEL: test_vsoxseg2_nxv4i64_triscv.vector.tuple_nxv32i8_2t_nxv4i16: 7585; CHECK: # %bb.0: # %entry 7586; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma 7587; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v16 7588; CHECK-NEXT: ret 7589entry: 7590 tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i16(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, <vscale x 4 x i16> %index, i64 %vl, i64 6) 7591 ret void 7592} 7593 7594define void @test_vsoxseg2_mask_nxv4i64_triscv.vector.tuple_nxv32i8_2t_nxv4i16(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, <vscale x 4 x i16> %index, i64 %vl, <vscale x 4 x i1> %mask) { 7595; CHECK-LABEL: test_vsoxseg2_mask_nxv4i64_triscv.vector.tuple_nxv32i8_2t_nxv4i16: 7596; CHECK: # %bb.0: # %entry 7597; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma 7598; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v16, v0.t 7599; CHECK-NEXT: ret 7600entry: 7601 tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i16.nxv4i1(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 6) 7602 ret void 7603} 7604 7605declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i32(target("riscv.vector.tuple", <vscale x 32 x i8>, 2), ptr, <vscale x 4 x i32>, i64, i64) 7606declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i32.nxv4i1(target("riscv.vector.tuple", <vscale x 32 x i8>, 2), ptr, <vscale x 4 x i32>, <vscale x 4 x i1>, i64, i64) 7607 7608define void @test_vsoxseg2_nxv4i64_triscv.vector.tuple_nxv32i8_2t_nxv4i32(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, <vscale x 4 x i32> %index, i64 %vl) { 7609; CHECK-LABEL: test_vsoxseg2_nxv4i64_triscv.vector.tuple_nxv32i8_2t_nxv4i32: 7610; CHECK: # %bb.0: # %entry 7611; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma 7612; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v16 7613; CHECK-NEXT: ret 7614entry: 7615 tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i32(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, <vscale x 4 x i32> %index, i64 %vl, i64 6) 7616 ret void 7617} 7618 7619define void @test_vsoxseg2_mask_nxv4i64_triscv.vector.tuple_nxv32i8_2t_nxv4i32(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, <vscale x 4 x i32> %index, i64 %vl, <vscale x 4 x i1> %mask) { 7620; CHECK-LABEL: test_vsoxseg2_mask_nxv4i64_triscv.vector.tuple_nxv32i8_2t_nxv4i32: 7621; CHECK: # %bb.0: # %entry 7622; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma 7623; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v16, v0.t 7624; CHECK-NEXT: ret 7625entry: 7626 tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i32.nxv4i1(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 6) 7627 ret void 7628} 7629 7630declare void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i64(target("riscv.vector.tuple", <vscale x 32 x i8>, 2), ptr, <vscale x 4 x i64>, i64, i64) 7631declare void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i64.nxv4i1(target("riscv.vector.tuple", <vscale x 32 x i8>, 2), ptr, <vscale x 4 x i64>, <vscale x 4 x i1>, i64, i64) 7632 7633define void @test_vsoxseg2_nxv4i64_triscv.vector.tuple_nxv32i8_2t_nxv4i64(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, <vscale x 4 x i64> %index, i64 %vl) { 7634; CHECK-LABEL: test_vsoxseg2_nxv4i64_triscv.vector.tuple_nxv32i8_2t_nxv4i64: 7635; CHECK: # %bb.0: # %entry 7636; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma 7637; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v16 7638; CHECK-NEXT: ret 7639entry: 7640 tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i64(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, <vscale x 4 x i64> %index, i64 %vl, i64 6) 7641 ret void 7642} 7643 7644define void @test_vsoxseg2_mask_nxv4i64_triscv.vector.tuple_nxv32i8_2t_nxv4i64(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, <vscale x 4 x i64> %index, i64 %vl, <vscale x 4 x i1> %mask) { 7645; CHECK-LABEL: test_vsoxseg2_mask_nxv4i64_triscv.vector.tuple_nxv32i8_2t_nxv4i64: 7646; CHECK: # %bb.0: # %entry 7647; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma 7648; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v16, v0.t 7649; CHECK-NEXT: ret 7650entry: 7651 tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i64.nxv4i1(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 6) 7652 ret void 7653} 7654 7655declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 3), ptr, <vscale x 1 x i8>, i64, i64) 7656declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i8.nxv1i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 3), ptr, <vscale x 1 x i8>, <vscale x 1 x i1>, i64, i64) 7657 7658define void @test_vsoxseg3_nxv1i64_triscv.vector.tuple_nxv8i8_3t_nxv1i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, <vscale x 1 x i8> %index, i64 %vl) { 7659; CHECK-LABEL: test_vsoxseg3_nxv1i64_triscv.vector.tuple_nxv8i8_3t_nxv1i8: 7660; CHECK: # %bb.0: # %entry 7661; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma 7662; CHECK-NEXT: vsoxseg3ei8.v v8, (a0), v11 7663; CHECK-NEXT: ret 7664entry: 7665 tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, <vscale x 1 x i8> %index, i64 %vl, i64 6) 7666 ret void 7667} 7668 7669define void @test_vsoxseg3_mask_nxv1i64_triscv.vector.tuple_nxv8i8_3t_nxv1i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, <vscale x 1 x i8> %index, i64 %vl, <vscale x 1 x i1> %mask) { 7670; CHECK-LABEL: test_vsoxseg3_mask_nxv1i64_triscv.vector.tuple_nxv8i8_3t_nxv1i8: 7671; CHECK: # %bb.0: # %entry 7672; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma 7673; CHECK-NEXT: vsoxseg3ei8.v v8, (a0), v11, v0.t 7674; CHECK-NEXT: ret 7675entry: 7676 tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i8.nxv1i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 6) 7677 ret void 7678} 7679 7680declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 3), ptr, <vscale x 1 x i16>, i64, i64) 7681declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i16.nxv1i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 3), ptr, <vscale x 1 x i16>, <vscale x 1 x i1>, i64, i64) 7682 7683define void @test_vsoxseg3_nxv1i64_triscv.vector.tuple_nxv8i8_3t_nxv1i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl) { 7684; CHECK-LABEL: test_vsoxseg3_nxv1i64_triscv.vector.tuple_nxv8i8_3t_nxv1i16: 7685; CHECK: # %bb.0: # %entry 7686; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma 7687; CHECK-NEXT: vsoxseg3ei16.v v8, (a0), v11 7688; CHECK-NEXT: ret 7689entry: 7690 tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl, i64 6) 7691 ret void 7692} 7693 7694define void @test_vsoxseg3_mask_nxv1i64_triscv.vector.tuple_nxv8i8_3t_nxv1i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl, <vscale x 1 x i1> %mask) { 7695; CHECK-LABEL: test_vsoxseg3_mask_nxv1i64_triscv.vector.tuple_nxv8i8_3t_nxv1i16: 7696; CHECK: # %bb.0: # %entry 7697; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma 7698; CHECK-NEXT: vsoxseg3ei16.v v8, (a0), v11, v0.t 7699; CHECK-NEXT: ret 7700entry: 7701 tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i16.nxv1i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 6) 7702 ret void 7703} 7704 7705declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 3), ptr, <vscale x 1 x i32>, i64, i64) 7706declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i32.nxv1i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 3), ptr, <vscale x 1 x i32>, <vscale x 1 x i1>, i64, i64) 7707 7708define void @test_vsoxseg3_nxv1i64_triscv.vector.tuple_nxv8i8_3t_nxv1i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, <vscale x 1 x i32> %index, i64 %vl) { 7709; CHECK-LABEL: test_vsoxseg3_nxv1i64_triscv.vector.tuple_nxv8i8_3t_nxv1i32: 7710; CHECK: # %bb.0: # %entry 7711; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma 7712; CHECK-NEXT: vsoxseg3ei32.v v8, (a0), v11 7713; CHECK-NEXT: ret 7714entry: 7715 tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, <vscale x 1 x i32> %index, i64 %vl, i64 6) 7716 ret void 7717} 7718 7719define void @test_vsoxseg3_mask_nxv1i64_triscv.vector.tuple_nxv8i8_3t_nxv1i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, <vscale x 1 x i32> %index, i64 %vl, <vscale x 1 x i1> %mask) { 7720; CHECK-LABEL: test_vsoxseg3_mask_nxv1i64_triscv.vector.tuple_nxv8i8_3t_nxv1i32: 7721; CHECK: # %bb.0: # %entry 7722; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma 7723; CHECK-NEXT: vsoxseg3ei32.v v8, (a0), v11, v0.t 7724; CHECK-NEXT: ret 7725entry: 7726 tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i32.nxv1i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 6) 7727 ret void 7728} 7729 7730declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 3), ptr, <vscale x 1 x i64>, i64, i64) 7731declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i64.nxv1i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 3), ptr, <vscale x 1 x i64>, <vscale x 1 x i1>, i64, i64) 7732 7733define void @test_vsoxseg3_nxv1i64_triscv.vector.tuple_nxv8i8_3t_nxv1i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, <vscale x 1 x i64> %index, i64 %vl) { 7734; CHECK-LABEL: test_vsoxseg3_nxv1i64_triscv.vector.tuple_nxv8i8_3t_nxv1i64: 7735; CHECK: # %bb.0: # %entry 7736; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma 7737; CHECK-NEXT: vsoxseg3ei64.v v8, (a0), v11 7738; CHECK-NEXT: ret 7739entry: 7740 tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, <vscale x 1 x i64> %index, i64 %vl, i64 6) 7741 ret void 7742} 7743 7744define void @test_vsoxseg3_mask_nxv1i64_triscv.vector.tuple_nxv8i8_3t_nxv1i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, <vscale x 1 x i64> %index, i64 %vl, <vscale x 1 x i1> %mask) { 7745; CHECK-LABEL: test_vsoxseg3_mask_nxv1i64_triscv.vector.tuple_nxv8i8_3t_nxv1i64: 7746; CHECK: # %bb.0: # %entry 7747; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma 7748; CHECK-NEXT: vsoxseg3ei64.v v8, (a0), v11, v0.t 7749; CHECK-NEXT: ret 7750entry: 7751 tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i64.nxv1i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 6) 7752 ret void 7753} 7754 7755declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i8(target("riscv.vector.tuple", <vscale x 16 x i8>, 3), ptr, <vscale x 2 x i8>, i64, i64) 7756declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i8.nxv2i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 3), ptr, <vscale x 2 x i8>, <vscale x 2 x i1>, i64, i64) 7757 7758define void @test_vsoxseg3_nxv2i64_triscv.vector.tuple_nxv16i8_3t_nxv2i8(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, <vscale x 2 x i8> %index, i64 %vl) { 7759; CHECK-LABEL: test_vsoxseg3_nxv2i64_triscv.vector.tuple_nxv16i8_3t_nxv2i8: 7760; CHECK: # %bb.0: # %entry 7761; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma 7762; CHECK-NEXT: vsoxseg3ei8.v v8, (a0), v14 7763; CHECK-NEXT: ret 7764entry: 7765 tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i8(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, <vscale x 2 x i8> %index, i64 %vl, i64 6) 7766 ret void 7767} 7768 7769define void @test_vsoxseg3_mask_nxv2i64_triscv.vector.tuple_nxv16i8_3t_nxv2i8(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, <vscale x 2 x i8> %index, i64 %vl, <vscale x 2 x i1> %mask) { 7770; CHECK-LABEL: test_vsoxseg3_mask_nxv2i64_triscv.vector.tuple_nxv16i8_3t_nxv2i8: 7771; CHECK: # %bb.0: # %entry 7772; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma 7773; CHECK-NEXT: vsoxseg3ei8.v v8, (a0), v14, v0.t 7774; CHECK-NEXT: ret 7775entry: 7776 tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i8.nxv2i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 6) 7777 ret void 7778} 7779 7780declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i16(target("riscv.vector.tuple", <vscale x 16 x i8>, 3), ptr, <vscale x 2 x i16>, i64, i64) 7781declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i16.nxv2i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 3), ptr, <vscale x 2 x i16>, <vscale x 2 x i1>, i64, i64) 7782 7783define void @test_vsoxseg3_nxv2i64_triscv.vector.tuple_nxv16i8_3t_nxv2i16(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, <vscale x 2 x i16> %index, i64 %vl) { 7784; CHECK-LABEL: test_vsoxseg3_nxv2i64_triscv.vector.tuple_nxv16i8_3t_nxv2i16: 7785; CHECK: # %bb.0: # %entry 7786; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma 7787; CHECK-NEXT: vsoxseg3ei16.v v8, (a0), v14 7788; CHECK-NEXT: ret 7789entry: 7790 tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i16(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, <vscale x 2 x i16> %index, i64 %vl, i64 6) 7791 ret void 7792} 7793 7794define void @test_vsoxseg3_mask_nxv2i64_triscv.vector.tuple_nxv16i8_3t_nxv2i16(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, <vscale x 2 x i16> %index, i64 %vl, <vscale x 2 x i1> %mask) { 7795; CHECK-LABEL: test_vsoxseg3_mask_nxv2i64_triscv.vector.tuple_nxv16i8_3t_nxv2i16: 7796; CHECK: # %bb.0: # %entry 7797; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma 7798; CHECK-NEXT: vsoxseg3ei16.v v8, (a0), v14, v0.t 7799; CHECK-NEXT: ret 7800entry: 7801 tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i16.nxv2i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 6) 7802 ret void 7803} 7804 7805declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i32(target("riscv.vector.tuple", <vscale x 16 x i8>, 3), ptr, <vscale x 2 x i32>, i64, i64) 7806declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i32.nxv2i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 3), ptr, <vscale x 2 x i32>, <vscale x 2 x i1>, i64, i64) 7807 7808define void @test_vsoxseg3_nxv2i64_triscv.vector.tuple_nxv16i8_3t_nxv2i32(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, <vscale x 2 x i32> %index, i64 %vl) { 7809; CHECK-LABEL: test_vsoxseg3_nxv2i64_triscv.vector.tuple_nxv16i8_3t_nxv2i32: 7810; CHECK: # %bb.0: # %entry 7811; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma 7812; CHECK-NEXT: vsoxseg3ei32.v v8, (a0), v14 7813; CHECK-NEXT: ret 7814entry: 7815 tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i32(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, <vscale x 2 x i32> %index, i64 %vl, i64 6) 7816 ret void 7817} 7818 7819define void @test_vsoxseg3_mask_nxv2i64_triscv.vector.tuple_nxv16i8_3t_nxv2i32(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, <vscale x 2 x i32> %index, i64 %vl, <vscale x 2 x i1> %mask) { 7820; CHECK-LABEL: test_vsoxseg3_mask_nxv2i64_triscv.vector.tuple_nxv16i8_3t_nxv2i32: 7821; CHECK: # %bb.0: # %entry 7822; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma 7823; CHECK-NEXT: vsoxseg3ei32.v v8, (a0), v14, v0.t 7824; CHECK-NEXT: ret 7825entry: 7826 tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i32.nxv2i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 6) 7827 ret void 7828} 7829 7830declare void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 3), ptr, <vscale x 2 x i64>, i64, i64) 7831declare void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i64.nxv2i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 3), ptr, <vscale x 2 x i64>, <vscale x 2 x i1>, i64, i64) 7832 7833define void @test_vsoxseg3_nxv2i64_triscv.vector.tuple_nxv16i8_3t_nxv2i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl) { 7834; CHECK-LABEL: test_vsoxseg3_nxv2i64_triscv.vector.tuple_nxv16i8_3t_nxv2i64: 7835; CHECK: # %bb.0: # %entry 7836; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma 7837; CHECK-NEXT: vsoxseg3ei64.v v8, (a0), v14 7838; CHECK-NEXT: ret 7839entry: 7840 tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl, i64 6) 7841 ret void 7842} 7843 7844define void @test_vsoxseg3_mask_nxv2i64_triscv.vector.tuple_nxv16i8_3t_nxv2i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl, <vscale x 2 x i1> %mask) { 7845; CHECK-LABEL: test_vsoxseg3_mask_nxv2i64_triscv.vector.tuple_nxv16i8_3t_nxv2i64: 7846; CHECK: # %bb.0: # %entry 7847; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma 7848; CHECK-NEXT: vsoxseg3ei64.v v8, (a0), v14, v0.t 7849; CHECK-NEXT: ret 7850entry: 7851 tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i64.nxv2i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 6) 7852 ret void 7853} 7854 7855declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 4), ptr, <vscale x 1 x i8>, i64, i64) 7856declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i8.nxv1i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 4), ptr, <vscale x 1 x i8>, <vscale x 1 x i1>, i64, i64) 7857 7858define void @test_vsoxseg4_nxv1i64_triscv.vector.tuple_nxv8i8_4t_nxv1i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, <vscale x 1 x i8> %index, i64 %vl) { 7859; CHECK-LABEL: test_vsoxseg4_nxv1i64_triscv.vector.tuple_nxv8i8_4t_nxv1i8: 7860; CHECK: # %bb.0: # %entry 7861; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma 7862; CHECK-NEXT: vsoxseg4ei8.v v8, (a0), v12 7863; CHECK-NEXT: ret 7864entry: 7865 tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, <vscale x 1 x i8> %index, i64 %vl, i64 6) 7866 ret void 7867} 7868 7869define void @test_vsoxseg4_mask_nxv1i64_triscv.vector.tuple_nxv8i8_4t_nxv1i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, <vscale x 1 x i8> %index, i64 %vl, <vscale x 1 x i1> %mask) { 7870; CHECK-LABEL: test_vsoxseg4_mask_nxv1i64_triscv.vector.tuple_nxv8i8_4t_nxv1i8: 7871; CHECK: # %bb.0: # %entry 7872; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma 7873; CHECK-NEXT: vsoxseg4ei8.v v8, (a0), v12, v0.t 7874; CHECK-NEXT: ret 7875entry: 7876 tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i8.nxv1i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 6) 7877 ret void 7878} 7879 7880declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 4), ptr, <vscale x 1 x i16>, i64, i64) 7881declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i16.nxv1i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 4), ptr, <vscale x 1 x i16>, <vscale x 1 x i1>, i64, i64) 7882 7883define void @test_vsoxseg4_nxv1i64_triscv.vector.tuple_nxv8i8_4t_nxv1i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl) { 7884; CHECK-LABEL: test_vsoxseg4_nxv1i64_triscv.vector.tuple_nxv8i8_4t_nxv1i16: 7885; CHECK: # %bb.0: # %entry 7886; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma 7887; CHECK-NEXT: vsoxseg4ei16.v v8, (a0), v12 7888; CHECK-NEXT: ret 7889entry: 7890 tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl, i64 6) 7891 ret void 7892} 7893 7894define void @test_vsoxseg4_mask_nxv1i64_triscv.vector.tuple_nxv8i8_4t_nxv1i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl, <vscale x 1 x i1> %mask) { 7895; CHECK-LABEL: test_vsoxseg4_mask_nxv1i64_triscv.vector.tuple_nxv8i8_4t_nxv1i16: 7896; CHECK: # %bb.0: # %entry 7897; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma 7898; CHECK-NEXT: vsoxseg4ei16.v v8, (a0), v12, v0.t 7899; CHECK-NEXT: ret 7900entry: 7901 tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i16.nxv1i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 6) 7902 ret void 7903} 7904 7905declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 4), ptr, <vscale x 1 x i32>, i64, i64) 7906declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i32.nxv1i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 4), ptr, <vscale x 1 x i32>, <vscale x 1 x i1>, i64, i64) 7907 7908define void @test_vsoxseg4_nxv1i64_triscv.vector.tuple_nxv8i8_4t_nxv1i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, <vscale x 1 x i32> %index, i64 %vl) { 7909; CHECK-LABEL: test_vsoxseg4_nxv1i64_triscv.vector.tuple_nxv8i8_4t_nxv1i32: 7910; CHECK: # %bb.0: # %entry 7911; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma 7912; CHECK-NEXT: vsoxseg4ei32.v v8, (a0), v12 7913; CHECK-NEXT: ret 7914entry: 7915 tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, <vscale x 1 x i32> %index, i64 %vl, i64 6) 7916 ret void 7917} 7918 7919define void @test_vsoxseg4_mask_nxv1i64_triscv.vector.tuple_nxv8i8_4t_nxv1i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, <vscale x 1 x i32> %index, i64 %vl, <vscale x 1 x i1> %mask) { 7920; CHECK-LABEL: test_vsoxseg4_mask_nxv1i64_triscv.vector.tuple_nxv8i8_4t_nxv1i32: 7921; CHECK: # %bb.0: # %entry 7922; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma 7923; CHECK-NEXT: vsoxseg4ei32.v v8, (a0), v12, v0.t 7924; CHECK-NEXT: ret 7925entry: 7926 tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i32.nxv1i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 6) 7927 ret void 7928} 7929 7930declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 4), ptr, <vscale x 1 x i64>, i64, i64) 7931declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i64.nxv1i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 4), ptr, <vscale x 1 x i64>, <vscale x 1 x i1>, i64, i64) 7932 7933define void @test_vsoxseg4_nxv1i64_triscv.vector.tuple_nxv8i8_4t_nxv1i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, <vscale x 1 x i64> %index, i64 %vl) { 7934; CHECK-LABEL: test_vsoxseg4_nxv1i64_triscv.vector.tuple_nxv8i8_4t_nxv1i64: 7935; CHECK: # %bb.0: # %entry 7936; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma 7937; CHECK-NEXT: vsoxseg4ei64.v v8, (a0), v12 7938; CHECK-NEXT: ret 7939entry: 7940 tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, <vscale x 1 x i64> %index, i64 %vl, i64 6) 7941 ret void 7942} 7943 7944define void @test_vsoxseg4_mask_nxv1i64_triscv.vector.tuple_nxv8i8_4t_nxv1i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, <vscale x 1 x i64> %index, i64 %vl, <vscale x 1 x i1> %mask) { 7945; CHECK-LABEL: test_vsoxseg4_mask_nxv1i64_triscv.vector.tuple_nxv8i8_4t_nxv1i64: 7946; CHECK: # %bb.0: # %entry 7947; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma 7948; CHECK-NEXT: vsoxseg4ei64.v v8, (a0), v12, v0.t 7949; CHECK-NEXT: ret 7950entry: 7951 tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i64.nxv1i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 6) 7952 ret void 7953} 7954 7955declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i8(target("riscv.vector.tuple", <vscale x 16 x i8>, 4), ptr, <vscale x 2 x i8>, i64, i64) 7956declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i8.nxv2i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 4), ptr, <vscale x 2 x i8>, <vscale x 2 x i1>, i64, i64) 7957 7958define void @test_vsoxseg4_nxv2i64_triscv.vector.tuple_nxv16i8_4t_nxv2i8(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, <vscale x 2 x i8> %index, i64 %vl) { 7959; CHECK-LABEL: test_vsoxseg4_nxv2i64_triscv.vector.tuple_nxv16i8_4t_nxv2i8: 7960; CHECK: # %bb.0: # %entry 7961; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma 7962; CHECK-NEXT: vsoxseg4ei8.v v8, (a0), v16 7963; CHECK-NEXT: ret 7964entry: 7965 tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i8(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, <vscale x 2 x i8> %index, i64 %vl, i64 6) 7966 ret void 7967} 7968 7969define void @test_vsoxseg4_mask_nxv2i64_triscv.vector.tuple_nxv16i8_4t_nxv2i8(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, <vscale x 2 x i8> %index, i64 %vl, <vscale x 2 x i1> %mask) { 7970; CHECK-LABEL: test_vsoxseg4_mask_nxv2i64_triscv.vector.tuple_nxv16i8_4t_nxv2i8: 7971; CHECK: # %bb.0: # %entry 7972; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma 7973; CHECK-NEXT: vsoxseg4ei8.v v8, (a0), v16, v0.t 7974; CHECK-NEXT: ret 7975entry: 7976 tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i8.nxv2i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 6) 7977 ret void 7978} 7979 7980declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i16(target("riscv.vector.tuple", <vscale x 16 x i8>, 4), ptr, <vscale x 2 x i16>, i64, i64) 7981declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i16.nxv2i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 4), ptr, <vscale x 2 x i16>, <vscale x 2 x i1>, i64, i64) 7982 7983define void @test_vsoxseg4_nxv2i64_triscv.vector.tuple_nxv16i8_4t_nxv2i16(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, <vscale x 2 x i16> %index, i64 %vl) { 7984; CHECK-LABEL: test_vsoxseg4_nxv2i64_triscv.vector.tuple_nxv16i8_4t_nxv2i16: 7985; CHECK: # %bb.0: # %entry 7986; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma 7987; CHECK-NEXT: vsoxseg4ei16.v v8, (a0), v16 7988; CHECK-NEXT: ret 7989entry: 7990 tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i16(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, <vscale x 2 x i16> %index, i64 %vl, i64 6) 7991 ret void 7992} 7993 7994define void @test_vsoxseg4_mask_nxv2i64_triscv.vector.tuple_nxv16i8_4t_nxv2i16(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, <vscale x 2 x i16> %index, i64 %vl, <vscale x 2 x i1> %mask) { 7995; CHECK-LABEL: test_vsoxseg4_mask_nxv2i64_triscv.vector.tuple_nxv16i8_4t_nxv2i16: 7996; CHECK: # %bb.0: # %entry 7997; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma 7998; CHECK-NEXT: vsoxseg4ei16.v v8, (a0), v16, v0.t 7999; CHECK-NEXT: ret 8000entry: 8001 tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i16.nxv2i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 6) 8002 ret void 8003} 8004 8005declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i32(target("riscv.vector.tuple", <vscale x 16 x i8>, 4), ptr, <vscale x 2 x i32>, i64, i64) 8006declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i32.nxv2i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 4), ptr, <vscale x 2 x i32>, <vscale x 2 x i1>, i64, i64) 8007 8008define void @test_vsoxseg4_nxv2i64_triscv.vector.tuple_nxv16i8_4t_nxv2i32(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, <vscale x 2 x i32> %index, i64 %vl) { 8009; CHECK-LABEL: test_vsoxseg4_nxv2i64_triscv.vector.tuple_nxv16i8_4t_nxv2i32: 8010; CHECK: # %bb.0: # %entry 8011; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma 8012; CHECK-NEXT: vsoxseg4ei32.v v8, (a0), v16 8013; CHECK-NEXT: ret 8014entry: 8015 tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i32(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, <vscale x 2 x i32> %index, i64 %vl, i64 6) 8016 ret void 8017} 8018 8019define void @test_vsoxseg4_mask_nxv2i64_triscv.vector.tuple_nxv16i8_4t_nxv2i32(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, <vscale x 2 x i32> %index, i64 %vl, <vscale x 2 x i1> %mask) { 8020; CHECK-LABEL: test_vsoxseg4_mask_nxv2i64_triscv.vector.tuple_nxv16i8_4t_nxv2i32: 8021; CHECK: # %bb.0: # %entry 8022; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma 8023; CHECK-NEXT: vsoxseg4ei32.v v8, (a0), v16, v0.t 8024; CHECK-NEXT: ret 8025entry: 8026 tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i32.nxv2i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 6) 8027 ret void 8028} 8029 8030declare void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 4), ptr, <vscale x 2 x i64>, i64, i64) 8031declare void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i64.nxv2i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 4), ptr, <vscale x 2 x i64>, <vscale x 2 x i1>, i64, i64) 8032 8033define void @test_vsoxseg4_nxv2i64_triscv.vector.tuple_nxv16i8_4t_nxv2i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl) { 8034; CHECK-LABEL: test_vsoxseg4_nxv2i64_triscv.vector.tuple_nxv16i8_4t_nxv2i64: 8035; CHECK: # %bb.0: # %entry 8036; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma 8037; CHECK-NEXT: vsoxseg4ei64.v v8, (a0), v16 8038; CHECK-NEXT: ret 8039entry: 8040 tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl, i64 6) 8041 ret void 8042} 8043 8044define void @test_vsoxseg4_mask_nxv2i64_triscv.vector.tuple_nxv16i8_4t_nxv2i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl, <vscale x 2 x i1> %mask) { 8045; CHECK-LABEL: test_vsoxseg4_mask_nxv2i64_triscv.vector.tuple_nxv16i8_4t_nxv2i64: 8046; CHECK: # %bb.0: # %entry 8047; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma 8048; CHECK-NEXT: vsoxseg4ei64.v v8, (a0), v16, v0.t 8049; CHECK-NEXT: ret 8050entry: 8051 tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i64.nxv2i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 6) 8052 ret void 8053} 8054 8055declare void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 5), ptr, <vscale x 1 x i8>, i64, i64) 8056declare void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i8.nxv1i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 5), ptr, <vscale x 1 x i8>, <vscale x 1 x i1>, i64, i64) 8057 8058define void @test_vsoxseg5_nxv1i64_triscv.vector.tuple_nxv8i8_5t_nxv1i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, <vscale x 1 x i8> %index, i64 %vl) { 8059; CHECK-LABEL: test_vsoxseg5_nxv1i64_triscv.vector.tuple_nxv8i8_5t_nxv1i8: 8060; CHECK: # %bb.0: # %entry 8061; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma 8062; CHECK-NEXT: vsoxseg5ei8.v v8, (a0), v13 8063; CHECK-NEXT: ret 8064entry: 8065 tail call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, <vscale x 1 x i8> %index, i64 %vl, i64 6) 8066 ret void 8067} 8068 8069define void @test_vsoxseg5_mask_nxv1i64_triscv.vector.tuple_nxv8i8_5t_nxv1i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, <vscale x 1 x i8> %index, i64 %vl, <vscale x 1 x i1> %mask) { 8070; CHECK-LABEL: test_vsoxseg5_mask_nxv1i64_triscv.vector.tuple_nxv8i8_5t_nxv1i8: 8071; CHECK: # %bb.0: # %entry 8072; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma 8073; CHECK-NEXT: vsoxseg5ei8.v v8, (a0), v13, v0.t 8074; CHECK-NEXT: ret 8075entry: 8076 tail call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i8.nxv1i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 6) 8077 ret void 8078} 8079 8080declare void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 5), ptr, <vscale x 1 x i16>, i64, i64) 8081declare void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i16.nxv1i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 5), ptr, <vscale x 1 x i16>, <vscale x 1 x i1>, i64, i64) 8082 8083define void @test_vsoxseg5_nxv1i64_triscv.vector.tuple_nxv8i8_5t_nxv1i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl) { 8084; CHECK-LABEL: test_vsoxseg5_nxv1i64_triscv.vector.tuple_nxv8i8_5t_nxv1i16: 8085; CHECK: # %bb.0: # %entry 8086; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma 8087; CHECK-NEXT: vsoxseg5ei16.v v8, (a0), v13 8088; CHECK-NEXT: ret 8089entry: 8090 tail call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl, i64 6) 8091 ret void 8092} 8093 8094define void @test_vsoxseg5_mask_nxv1i64_triscv.vector.tuple_nxv8i8_5t_nxv1i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl, <vscale x 1 x i1> %mask) { 8095; CHECK-LABEL: test_vsoxseg5_mask_nxv1i64_triscv.vector.tuple_nxv8i8_5t_nxv1i16: 8096; CHECK: # %bb.0: # %entry 8097; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma 8098; CHECK-NEXT: vsoxseg5ei16.v v8, (a0), v13, v0.t 8099; CHECK-NEXT: ret 8100entry: 8101 tail call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i16.nxv1i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 6) 8102 ret void 8103} 8104 8105declare void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 5), ptr, <vscale x 1 x i32>, i64, i64) 8106declare void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i32.nxv1i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 5), ptr, <vscale x 1 x i32>, <vscale x 1 x i1>, i64, i64) 8107 8108define void @test_vsoxseg5_nxv1i64_triscv.vector.tuple_nxv8i8_5t_nxv1i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, <vscale x 1 x i32> %index, i64 %vl) { 8109; CHECK-LABEL: test_vsoxseg5_nxv1i64_triscv.vector.tuple_nxv8i8_5t_nxv1i32: 8110; CHECK: # %bb.0: # %entry 8111; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma 8112; CHECK-NEXT: vsoxseg5ei32.v v8, (a0), v13 8113; CHECK-NEXT: ret 8114entry: 8115 tail call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, <vscale x 1 x i32> %index, i64 %vl, i64 6) 8116 ret void 8117} 8118 8119define void @test_vsoxseg5_mask_nxv1i64_triscv.vector.tuple_nxv8i8_5t_nxv1i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, <vscale x 1 x i32> %index, i64 %vl, <vscale x 1 x i1> %mask) { 8120; CHECK-LABEL: test_vsoxseg5_mask_nxv1i64_triscv.vector.tuple_nxv8i8_5t_nxv1i32: 8121; CHECK: # %bb.0: # %entry 8122; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma 8123; CHECK-NEXT: vsoxseg5ei32.v v8, (a0), v13, v0.t 8124; CHECK-NEXT: ret 8125entry: 8126 tail call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i32.nxv1i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 6) 8127 ret void 8128} 8129 8130declare void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 5), ptr, <vscale x 1 x i64>, i64, i64) 8131declare void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i64.nxv1i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 5), ptr, <vscale x 1 x i64>, <vscale x 1 x i1>, i64, i64) 8132 8133define void @test_vsoxseg5_nxv1i64_triscv.vector.tuple_nxv8i8_5t_nxv1i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, <vscale x 1 x i64> %index, i64 %vl) { 8134; CHECK-LABEL: test_vsoxseg5_nxv1i64_triscv.vector.tuple_nxv8i8_5t_nxv1i64: 8135; CHECK: # %bb.0: # %entry 8136; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma 8137; CHECK-NEXT: vsoxseg5ei64.v v8, (a0), v13 8138; CHECK-NEXT: ret 8139entry: 8140 tail call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, <vscale x 1 x i64> %index, i64 %vl, i64 6) 8141 ret void 8142} 8143 8144define void @test_vsoxseg5_mask_nxv1i64_triscv.vector.tuple_nxv8i8_5t_nxv1i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, <vscale x 1 x i64> %index, i64 %vl, <vscale x 1 x i1> %mask) { 8145; CHECK-LABEL: test_vsoxseg5_mask_nxv1i64_triscv.vector.tuple_nxv8i8_5t_nxv1i64: 8146; CHECK: # %bb.0: # %entry 8147; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma 8148; CHECK-NEXT: vsoxseg5ei64.v v8, (a0), v13, v0.t 8149; CHECK-NEXT: ret 8150entry: 8151 tail call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i64.nxv1i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 6) 8152 ret void 8153} 8154 8155declare void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 6), ptr, <vscale x 1 x i8>, i64, i64) 8156declare void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i8.nxv1i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 6), ptr, <vscale x 1 x i8>, <vscale x 1 x i1>, i64, i64) 8157 8158define void @test_vsoxseg6_nxv1i64_triscv.vector.tuple_nxv8i8_6t_nxv1i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, <vscale x 1 x i8> %index, i64 %vl) { 8159; CHECK-LABEL: test_vsoxseg6_nxv1i64_triscv.vector.tuple_nxv8i8_6t_nxv1i8: 8160; CHECK: # %bb.0: # %entry 8161; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma 8162; CHECK-NEXT: vsoxseg6ei8.v v8, (a0), v14 8163; CHECK-NEXT: ret 8164entry: 8165 tail call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, <vscale x 1 x i8> %index, i64 %vl, i64 6) 8166 ret void 8167} 8168 8169define void @test_vsoxseg6_mask_nxv1i64_triscv.vector.tuple_nxv8i8_6t_nxv1i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, <vscale x 1 x i8> %index, i64 %vl, <vscale x 1 x i1> %mask) { 8170; CHECK-LABEL: test_vsoxseg6_mask_nxv1i64_triscv.vector.tuple_nxv8i8_6t_nxv1i8: 8171; CHECK: # %bb.0: # %entry 8172; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma 8173; CHECK-NEXT: vsoxseg6ei8.v v8, (a0), v14, v0.t 8174; CHECK-NEXT: ret 8175entry: 8176 tail call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i8.nxv1i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 6) 8177 ret void 8178} 8179 8180declare void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 6), ptr, <vscale x 1 x i16>, i64, i64) 8181declare void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i16.nxv1i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 6), ptr, <vscale x 1 x i16>, <vscale x 1 x i1>, i64, i64) 8182 8183define void @test_vsoxseg6_nxv1i64_triscv.vector.tuple_nxv8i8_6t_nxv1i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl) { 8184; CHECK-LABEL: test_vsoxseg6_nxv1i64_triscv.vector.tuple_nxv8i8_6t_nxv1i16: 8185; CHECK: # %bb.0: # %entry 8186; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma 8187; CHECK-NEXT: vsoxseg6ei16.v v8, (a0), v14 8188; CHECK-NEXT: ret 8189entry: 8190 tail call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl, i64 6) 8191 ret void 8192} 8193 8194define void @test_vsoxseg6_mask_nxv1i64_triscv.vector.tuple_nxv8i8_6t_nxv1i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl, <vscale x 1 x i1> %mask) { 8195; CHECK-LABEL: test_vsoxseg6_mask_nxv1i64_triscv.vector.tuple_nxv8i8_6t_nxv1i16: 8196; CHECK: # %bb.0: # %entry 8197; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma 8198; CHECK-NEXT: vsoxseg6ei16.v v8, (a0), v14, v0.t 8199; CHECK-NEXT: ret 8200entry: 8201 tail call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i16.nxv1i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 6) 8202 ret void 8203} 8204 8205declare void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 6), ptr, <vscale x 1 x i32>, i64, i64) 8206declare void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i32.nxv1i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 6), ptr, <vscale x 1 x i32>, <vscale x 1 x i1>, i64, i64) 8207 8208define void @test_vsoxseg6_nxv1i64_triscv.vector.tuple_nxv8i8_6t_nxv1i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, <vscale x 1 x i32> %index, i64 %vl) { 8209; CHECK-LABEL: test_vsoxseg6_nxv1i64_triscv.vector.tuple_nxv8i8_6t_nxv1i32: 8210; CHECK: # %bb.0: # %entry 8211; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma 8212; CHECK-NEXT: vsoxseg6ei32.v v8, (a0), v14 8213; CHECK-NEXT: ret 8214entry: 8215 tail call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, <vscale x 1 x i32> %index, i64 %vl, i64 6) 8216 ret void 8217} 8218 8219define void @test_vsoxseg6_mask_nxv1i64_triscv.vector.tuple_nxv8i8_6t_nxv1i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, <vscale x 1 x i32> %index, i64 %vl, <vscale x 1 x i1> %mask) { 8220; CHECK-LABEL: test_vsoxseg6_mask_nxv1i64_triscv.vector.tuple_nxv8i8_6t_nxv1i32: 8221; CHECK: # %bb.0: # %entry 8222; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma 8223; CHECK-NEXT: vsoxseg6ei32.v v8, (a0), v14, v0.t 8224; CHECK-NEXT: ret 8225entry: 8226 tail call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i32.nxv1i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 6) 8227 ret void 8228} 8229 8230declare void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 6), ptr, <vscale x 1 x i64>, i64, i64) 8231declare void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i64.nxv1i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 6), ptr, <vscale x 1 x i64>, <vscale x 1 x i1>, i64, i64) 8232 8233define void @test_vsoxseg6_nxv1i64_triscv.vector.tuple_nxv8i8_6t_nxv1i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, <vscale x 1 x i64> %index, i64 %vl) { 8234; CHECK-LABEL: test_vsoxseg6_nxv1i64_triscv.vector.tuple_nxv8i8_6t_nxv1i64: 8235; CHECK: # %bb.0: # %entry 8236; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma 8237; CHECK-NEXT: vsoxseg6ei64.v v8, (a0), v14 8238; CHECK-NEXT: ret 8239entry: 8240 tail call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, <vscale x 1 x i64> %index, i64 %vl, i64 6) 8241 ret void 8242} 8243 8244define void @test_vsoxseg6_mask_nxv1i64_triscv.vector.tuple_nxv8i8_6t_nxv1i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, <vscale x 1 x i64> %index, i64 %vl, <vscale x 1 x i1> %mask) { 8245; CHECK-LABEL: test_vsoxseg6_mask_nxv1i64_triscv.vector.tuple_nxv8i8_6t_nxv1i64: 8246; CHECK: # %bb.0: # %entry 8247; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma 8248; CHECK-NEXT: vsoxseg6ei64.v v8, (a0), v14, v0.t 8249; CHECK-NEXT: ret 8250entry: 8251 tail call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i64.nxv1i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 6) 8252 ret void 8253} 8254 8255declare void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 7), ptr, <vscale x 1 x i8>, i64, i64) 8256declare void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i8.nxv1i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 7), ptr, <vscale x 1 x i8>, <vscale x 1 x i1>, i64, i64) 8257 8258define void @test_vsoxseg7_nxv1i64_triscv.vector.tuple_nxv8i8_7t_nxv1i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, <vscale x 1 x i8> %index, i64 %vl) { 8259; CHECK-LABEL: test_vsoxseg7_nxv1i64_triscv.vector.tuple_nxv8i8_7t_nxv1i8: 8260; CHECK: # %bb.0: # %entry 8261; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma 8262; CHECK-NEXT: vsoxseg7ei8.v v8, (a0), v15 8263; CHECK-NEXT: ret 8264entry: 8265 tail call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, <vscale x 1 x i8> %index, i64 %vl, i64 6) 8266 ret void 8267} 8268 8269define void @test_vsoxseg7_mask_nxv1i64_triscv.vector.tuple_nxv8i8_7t_nxv1i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, <vscale x 1 x i8> %index, i64 %vl, <vscale x 1 x i1> %mask) { 8270; CHECK-LABEL: test_vsoxseg7_mask_nxv1i64_triscv.vector.tuple_nxv8i8_7t_nxv1i8: 8271; CHECK: # %bb.0: # %entry 8272; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma 8273; CHECK-NEXT: vsoxseg7ei8.v v8, (a0), v15, v0.t 8274; CHECK-NEXT: ret 8275entry: 8276 tail call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i8.nxv1i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 6) 8277 ret void 8278} 8279 8280declare void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 7), ptr, <vscale x 1 x i16>, i64, i64) 8281declare void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i16.nxv1i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 7), ptr, <vscale x 1 x i16>, <vscale x 1 x i1>, i64, i64) 8282 8283define void @test_vsoxseg7_nxv1i64_triscv.vector.tuple_nxv8i8_7t_nxv1i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl) { 8284; CHECK-LABEL: test_vsoxseg7_nxv1i64_triscv.vector.tuple_nxv8i8_7t_nxv1i16: 8285; CHECK: # %bb.0: # %entry 8286; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma 8287; CHECK-NEXT: vsoxseg7ei16.v v8, (a0), v15 8288; CHECK-NEXT: ret 8289entry: 8290 tail call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl, i64 6) 8291 ret void 8292} 8293 8294define void @test_vsoxseg7_mask_nxv1i64_triscv.vector.tuple_nxv8i8_7t_nxv1i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl, <vscale x 1 x i1> %mask) { 8295; CHECK-LABEL: test_vsoxseg7_mask_nxv1i64_triscv.vector.tuple_nxv8i8_7t_nxv1i16: 8296; CHECK: # %bb.0: # %entry 8297; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma 8298; CHECK-NEXT: vsoxseg7ei16.v v8, (a0), v15, v0.t 8299; CHECK-NEXT: ret 8300entry: 8301 tail call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i16.nxv1i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 6) 8302 ret void 8303} 8304 8305declare void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 7), ptr, <vscale x 1 x i32>, i64, i64) 8306declare void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i32.nxv1i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 7), ptr, <vscale x 1 x i32>, <vscale x 1 x i1>, i64, i64) 8307 8308define void @test_vsoxseg7_nxv1i64_triscv.vector.tuple_nxv8i8_7t_nxv1i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, <vscale x 1 x i32> %index, i64 %vl) { 8309; CHECK-LABEL: test_vsoxseg7_nxv1i64_triscv.vector.tuple_nxv8i8_7t_nxv1i32: 8310; CHECK: # %bb.0: # %entry 8311; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma 8312; CHECK-NEXT: vsoxseg7ei32.v v8, (a0), v15 8313; CHECK-NEXT: ret 8314entry: 8315 tail call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, <vscale x 1 x i32> %index, i64 %vl, i64 6) 8316 ret void 8317} 8318 8319define void @test_vsoxseg7_mask_nxv1i64_triscv.vector.tuple_nxv8i8_7t_nxv1i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, <vscale x 1 x i32> %index, i64 %vl, <vscale x 1 x i1> %mask) { 8320; CHECK-LABEL: test_vsoxseg7_mask_nxv1i64_triscv.vector.tuple_nxv8i8_7t_nxv1i32: 8321; CHECK: # %bb.0: # %entry 8322; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma 8323; CHECK-NEXT: vsoxseg7ei32.v v8, (a0), v15, v0.t 8324; CHECK-NEXT: ret 8325entry: 8326 tail call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i32.nxv1i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 6) 8327 ret void 8328} 8329 8330declare void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 7), ptr, <vscale x 1 x i64>, i64, i64) 8331declare void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i64.nxv1i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 7), ptr, <vscale x 1 x i64>, <vscale x 1 x i1>, i64, i64) 8332 8333define void @test_vsoxseg7_nxv1i64_triscv.vector.tuple_nxv8i8_7t_nxv1i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, <vscale x 1 x i64> %index, i64 %vl) { 8334; CHECK-LABEL: test_vsoxseg7_nxv1i64_triscv.vector.tuple_nxv8i8_7t_nxv1i64: 8335; CHECK: # %bb.0: # %entry 8336; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma 8337; CHECK-NEXT: vsoxseg7ei64.v v8, (a0), v15 8338; CHECK-NEXT: ret 8339entry: 8340 tail call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, <vscale x 1 x i64> %index, i64 %vl, i64 6) 8341 ret void 8342} 8343 8344define void @test_vsoxseg7_mask_nxv1i64_triscv.vector.tuple_nxv8i8_7t_nxv1i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, <vscale x 1 x i64> %index, i64 %vl, <vscale x 1 x i1> %mask) { 8345; CHECK-LABEL: test_vsoxseg7_mask_nxv1i64_triscv.vector.tuple_nxv8i8_7t_nxv1i64: 8346; CHECK: # %bb.0: # %entry 8347; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma 8348; CHECK-NEXT: vsoxseg7ei64.v v8, (a0), v15, v0.t 8349; CHECK-NEXT: ret 8350entry: 8351 tail call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i64.nxv1i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 6) 8352 ret void 8353} 8354 8355declare void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 8), ptr, <vscale x 1 x i8>, i64, i64) 8356declare void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i8.nxv1i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 8), ptr, <vscale x 1 x i8>, <vscale x 1 x i1>, i64, i64) 8357 8358define void @test_vsoxseg8_nxv1i64_triscv.vector.tuple_nxv8i8_8t_nxv1i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, <vscale x 1 x i8> %index, i64 %vl) { 8359; CHECK-LABEL: test_vsoxseg8_nxv1i64_triscv.vector.tuple_nxv8i8_8t_nxv1i8: 8360; CHECK: # %bb.0: # %entry 8361; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma 8362; CHECK-NEXT: vsoxseg8ei8.v v8, (a0), v16 8363; CHECK-NEXT: ret 8364entry: 8365 tail call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, <vscale x 1 x i8> %index, i64 %vl, i64 6) 8366 ret void 8367} 8368 8369define void @test_vsoxseg8_mask_nxv1i64_triscv.vector.tuple_nxv8i8_8t_nxv1i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, <vscale x 1 x i8> %index, i64 %vl, <vscale x 1 x i1> %mask) { 8370; CHECK-LABEL: test_vsoxseg8_mask_nxv1i64_triscv.vector.tuple_nxv8i8_8t_nxv1i8: 8371; CHECK: # %bb.0: # %entry 8372; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma 8373; CHECK-NEXT: vsoxseg8ei8.v v8, (a0), v16, v0.t 8374; CHECK-NEXT: ret 8375entry: 8376 tail call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i8.nxv1i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 6) 8377 ret void 8378} 8379 8380declare void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 8), ptr, <vscale x 1 x i16>, i64, i64) 8381declare void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i16.nxv1i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 8), ptr, <vscale x 1 x i16>, <vscale x 1 x i1>, i64, i64) 8382 8383define void @test_vsoxseg8_nxv1i64_triscv.vector.tuple_nxv8i8_8t_nxv1i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl) { 8384; CHECK-LABEL: test_vsoxseg8_nxv1i64_triscv.vector.tuple_nxv8i8_8t_nxv1i16: 8385; CHECK: # %bb.0: # %entry 8386; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma 8387; CHECK-NEXT: vsoxseg8ei16.v v8, (a0), v16 8388; CHECK-NEXT: ret 8389entry: 8390 tail call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl, i64 6) 8391 ret void 8392} 8393 8394define void @test_vsoxseg8_mask_nxv1i64_triscv.vector.tuple_nxv8i8_8t_nxv1i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl, <vscale x 1 x i1> %mask) { 8395; CHECK-LABEL: test_vsoxseg8_mask_nxv1i64_triscv.vector.tuple_nxv8i8_8t_nxv1i16: 8396; CHECK: # %bb.0: # %entry 8397; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma 8398; CHECK-NEXT: vsoxseg8ei16.v v8, (a0), v16, v0.t 8399; CHECK-NEXT: ret 8400entry: 8401 tail call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i16.nxv1i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 6) 8402 ret void 8403} 8404 8405declare void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 8), ptr, <vscale x 1 x i32>, i64, i64) 8406declare void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 8), ptr, <vscale x 1 x i32>, <vscale x 1 x i1>, i64, i64) 8407 8408define void @test_vsoxseg8_nxv1i64_triscv.vector.tuple_nxv8i8_8t_nxv1i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, <vscale x 1 x i32> %index, i64 %vl) { 8409; CHECK-LABEL: test_vsoxseg8_nxv1i64_triscv.vector.tuple_nxv8i8_8t_nxv1i32: 8410; CHECK: # %bb.0: # %entry 8411; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma 8412; CHECK-NEXT: vsoxseg8ei32.v v8, (a0), v16 8413; CHECK-NEXT: ret 8414entry: 8415 tail call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, <vscale x 1 x i32> %index, i64 %vl, i64 6) 8416 ret void 8417} 8418 8419define void @test_vsoxseg8_mask_nxv1i64_triscv.vector.tuple_nxv8i8_8t_nxv1i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, <vscale x 1 x i32> %index, i64 %vl, <vscale x 1 x i1> %mask) { 8420; CHECK-LABEL: test_vsoxseg8_mask_nxv1i64_triscv.vector.tuple_nxv8i8_8t_nxv1i32: 8421; CHECK: # %bb.0: # %entry 8422; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma 8423; CHECK-NEXT: vsoxseg8ei32.v v8, (a0), v16, v0.t 8424; CHECK-NEXT: ret 8425entry: 8426 tail call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 6) 8427 ret void 8428} 8429 8430declare void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 8), ptr, <vscale x 1 x i64>, i64, i64) 8431declare void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i64.nxv1i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 8), ptr, <vscale x 1 x i64>, <vscale x 1 x i1>, i64, i64) 8432 8433define void @test_vsoxseg8_nxv1i64_triscv.vector.tuple_nxv8i8_8t_nxv1i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, <vscale x 1 x i64> %index, i64 %vl) { 8434; CHECK-LABEL: test_vsoxseg8_nxv1i64_triscv.vector.tuple_nxv8i8_8t_nxv1i64: 8435; CHECK: # %bb.0: # %entry 8436; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma 8437; CHECK-NEXT: vsoxseg8ei64.v v8, (a0), v16 8438; CHECK-NEXT: ret 8439entry: 8440 tail call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, <vscale x 1 x i64> %index, i64 %vl, i64 6) 8441 ret void 8442} 8443 8444define void @test_vsoxseg8_mask_nxv1i64_triscv.vector.tuple_nxv8i8_8t_nxv1i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, <vscale x 1 x i64> %index, i64 %vl, <vscale x 1 x i1> %mask) { 8445; CHECK-LABEL: test_vsoxseg8_mask_nxv1i64_triscv.vector.tuple_nxv8i8_8t_nxv1i64: 8446; CHECK: # %bb.0: # %entry 8447; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma 8448; CHECK-NEXT: vsoxseg8ei64.v v8, (a0), v16, v0.t 8449; CHECK-NEXT: ret 8450entry: 8451 tail call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i64.nxv1i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 6) 8452 ret void 8453} 8454 8455 8456define void @test_vsoxseg2_nxv1f16_triscv.vector.tuple_nxv2i8_2t_nxv1i8(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) %val, ptr %base, <vscale x 1 x i8> %index, i64 %vl) { 8457; CHECK-LABEL: test_vsoxseg2_nxv1f16_triscv.vector.tuple_nxv2i8_2t_nxv1i8: 8458; CHECK: # %bb.0: # %entry 8459; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma 8460; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10 8461; CHECK-NEXT: ret 8462entry: 8463 tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i8(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) %val, ptr %base, <vscale x 1 x i8> %index, i64 %vl, i64 4) 8464 ret void 8465} 8466 8467define void @test_vsoxseg2_mask_nxv1f16_triscv.vector.tuple_nxv2i8_2t_nxv1i8(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) %val, ptr %base, <vscale x 1 x i8> %index, i64 %vl, <vscale x 1 x i1> %mask) { 8468; CHECK-LABEL: test_vsoxseg2_mask_nxv1f16_triscv.vector.tuple_nxv2i8_2t_nxv1i8: 8469; CHECK: # %bb.0: # %entry 8470; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma 8471; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10, v0.t 8472; CHECK-NEXT: ret 8473entry: 8474 tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 4) 8475 ret void 8476} 8477 8478 8479define void @test_vsoxseg2_nxv1f16_triscv.vector.tuple_nxv2i8_2t_nxv1i16(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl) { 8480; CHECK-LABEL: test_vsoxseg2_nxv1f16_triscv.vector.tuple_nxv2i8_2t_nxv1i16: 8481; CHECK: # %bb.0: # %entry 8482; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma 8483; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10 8484; CHECK-NEXT: ret 8485entry: 8486 tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i16(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl, i64 4) 8487 ret void 8488} 8489 8490define void @test_vsoxseg2_mask_nxv1f16_triscv.vector.tuple_nxv2i8_2t_nxv1i16(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl, <vscale x 1 x i1> %mask) { 8491; CHECK-LABEL: test_vsoxseg2_mask_nxv1f16_triscv.vector.tuple_nxv2i8_2t_nxv1i16: 8492; CHECK: # %bb.0: # %entry 8493; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma 8494; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10, v0.t 8495; CHECK-NEXT: ret 8496entry: 8497 tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 4) 8498 ret void 8499} 8500 8501 8502define void @test_vsoxseg2_nxv1f16_triscv.vector.tuple_nxv2i8_2t_nxv1i32(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) %val, ptr %base, <vscale x 1 x i32> %index, i64 %vl) { 8503; CHECK-LABEL: test_vsoxseg2_nxv1f16_triscv.vector.tuple_nxv2i8_2t_nxv1i32: 8504; CHECK: # %bb.0: # %entry 8505; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma 8506; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10 8507; CHECK-NEXT: ret 8508entry: 8509 tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i32(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) %val, ptr %base, <vscale x 1 x i32> %index, i64 %vl, i64 4) 8510 ret void 8511} 8512 8513define void @test_vsoxseg2_mask_nxv1f16_triscv.vector.tuple_nxv2i8_2t_nxv1i32(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) %val, ptr %base, <vscale x 1 x i32> %index, i64 %vl, <vscale x 1 x i1> %mask) { 8514; CHECK-LABEL: test_vsoxseg2_mask_nxv1f16_triscv.vector.tuple_nxv2i8_2t_nxv1i32: 8515; CHECK: # %bb.0: # %entry 8516; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma 8517; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10, v0.t 8518; CHECK-NEXT: ret 8519entry: 8520 tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 4) 8521 ret void 8522} 8523 8524 8525define void @test_vsoxseg2_nxv1f16_triscv.vector.tuple_nxv2i8_2t_nxv1i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) %val, ptr %base, <vscale x 1 x i64> %index, i64 %vl) { 8526; CHECK-LABEL: test_vsoxseg2_nxv1f16_triscv.vector.tuple_nxv2i8_2t_nxv1i64: 8527; CHECK: # %bb.0: # %entry 8528; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma 8529; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v10 8530; CHECK-NEXT: ret 8531entry: 8532 tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) %val, ptr %base, <vscale x 1 x i64> %index, i64 %vl, i64 4) 8533 ret void 8534} 8535 8536define void @test_vsoxseg2_mask_nxv1f16_triscv.vector.tuple_nxv2i8_2t_nxv1i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) %val, ptr %base, <vscale x 1 x i64> %index, i64 %vl, <vscale x 1 x i1> %mask) { 8537; CHECK-LABEL: test_vsoxseg2_mask_nxv1f16_triscv.vector.tuple_nxv2i8_2t_nxv1i64: 8538; CHECK: # %bb.0: # %entry 8539; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma 8540; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v10, v0.t 8541; CHECK-NEXT: ret 8542entry: 8543 tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i64.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) %val, ptr %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 4) 8544 ret void 8545} 8546 8547 8548define void @test_vsoxseg2_nxv2f16_triscv.vector.tuple_nxv4i8_2t_nxv2i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %val, ptr %base, <vscale x 2 x i8> %index, i64 %vl) { 8549; CHECK-LABEL: test_vsoxseg2_nxv2f16_triscv.vector.tuple_nxv4i8_2t_nxv2i8: 8550; CHECK: # %bb.0: # %entry 8551; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma 8552; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10 8553; CHECK-NEXT: ret 8554entry: 8555 tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %val, ptr %base, <vscale x 2 x i8> %index, i64 %vl, i64 4) 8556 ret void 8557} 8558 8559define void @test_vsoxseg2_mask_nxv2f16_triscv.vector.tuple_nxv4i8_2t_nxv2i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %val, ptr %base, <vscale x 2 x i8> %index, i64 %vl, <vscale x 2 x i1> %mask) { 8560; CHECK-LABEL: test_vsoxseg2_mask_nxv2f16_triscv.vector.tuple_nxv4i8_2t_nxv2i8: 8561; CHECK: # %bb.0: # %entry 8562; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma 8563; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10, v0.t 8564; CHECK-NEXT: ret 8565entry: 8566 tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 4) 8567 ret void 8568} 8569 8570 8571define void @test_vsoxseg2_nxv2f16_triscv.vector.tuple_nxv4i8_2t_nxv2i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %val, ptr %base, <vscale x 2 x i16> %index, i64 %vl) { 8572; CHECK-LABEL: test_vsoxseg2_nxv2f16_triscv.vector.tuple_nxv4i8_2t_nxv2i16: 8573; CHECK: # %bb.0: # %entry 8574; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma 8575; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10 8576; CHECK-NEXT: ret 8577entry: 8578 tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %val, ptr %base, <vscale x 2 x i16> %index, i64 %vl, i64 4) 8579 ret void 8580} 8581 8582define void @test_vsoxseg2_mask_nxv2f16_triscv.vector.tuple_nxv4i8_2t_nxv2i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %val, ptr %base, <vscale x 2 x i16> %index, i64 %vl, <vscale x 2 x i1> %mask) { 8583; CHECK-LABEL: test_vsoxseg2_mask_nxv2f16_triscv.vector.tuple_nxv4i8_2t_nxv2i16: 8584; CHECK: # %bb.0: # %entry 8585; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma 8586; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10, v0.t 8587; CHECK-NEXT: ret 8588entry: 8589 tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 4) 8590 ret void 8591} 8592 8593 8594define void @test_vsoxseg2_nxv2f16_triscv.vector.tuple_nxv4i8_2t_nxv2i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %val, ptr %base, <vscale x 2 x i32> %index, i64 %vl) { 8595; CHECK-LABEL: test_vsoxseg2_nxv2f16_triscv.vector.tuple_nxv4i8_2t_nxv2i32: 8596; CHECK: # %bb.0: # %entry 8597; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma 8598; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10 8599; CHECK-NEXT: ret 8600entry: 8601 tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %val, ptr %base, <vscale x 2 x i32> %index, i64 %vl, i64 4) 8602 ret void 8603} 8604 8605define void @test_vsoxseg2_mask_nxv2f16_triscv.vector.tuple_nxv4i8_2t_nxv2i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %val, ptr %base, <vscale x 2 x i32> %index, i64 %vl, <vscale x 2 x i1> %mask) { 8606; CHECK-LABEL: test_vsoxseg2_mask_nxv2f16_triscv.vector.tuple_nxv4i8_2t_nxv2i32: 8607; CHECK: # %bb.0: # %entry 8608; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma 8609; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10, v0.t 8610; CHECK-NEXT: ret 8611entry: 8612 tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 4) 8613 ret void 8614} 8615 8616 8617define void @test_vsoxseg2_nxv2f16_triscv.vector.tuple_nxv4i8_2t_nxv2i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl) { 8618; CHECK-LABEL: test_vsoxseg2_nxv2f16_triscv.vector.tuple_nxv4i8_2t_nxv2i64: 8619; CHECK: # %bb.0: # %entry 8620; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma 8621; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v10 8622; CHECK-NEXT: ret 8623entry: 8624 tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl, i64 4) 8625 ret void 8626} 8627 8628define void @test_vsoxseg2_mask_nxv2f16_triscv.vector.tuple_nxv4i8_2t_nxv2i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl, <vscale x 2 x i1> %mask) { 8629; CHECK-LABEL: test_vsoxseg2_mask_nxv2f16_triscv.vector.tuple_nxv4i8_2t_nxv2i64: 8630; CHECK: # %bb.0: # %entry 8631; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma 8632; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v10, v0.t 8633; CHECK-NEXT: ret 8634entry: 8635 tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i64.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %val, ptr %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 4) 8636 ret void 8637} 8638 8639 8640define void @test_vsoxseg2_nxv4f16_triscv.vector.tuple_nxv8i8_2t_nxv4i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, <vscale x 4 x i8> %index, i64 %vl) { 8641; CHECK-LABEL: test_vsoxseg2_nxv4f16_triscv.vector.tuple_nxv8i8_2t_nxv4i8: 8642; CHECK: # %bb.0: # %entry 8643; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma 8644; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10 8645; CHECK-NEXT: ret 8646entry: 8647 tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, <vscale x 4 x i8> %index, i64 %vl, i64 4) 8648 ret void 8649} 8650 8651define void @test_vsoxseg2_mask_nxv4f16_triscv.vector.tuple_nxv8i8_2t_nxv4i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, <vscale x 4 x i8> %index, i64 %vl, <vscale x 4 x i1> %mask) { 8652; CHECK-LABEL: test_vsoxseg2_mask_nxv4f16_triscv.vector.tuple_nxv8i8_2t_nxv4i8: 8653; CHECK: # %bb.0: # %entry 8654; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma 8655; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10, v0.t 8656; CHECK-NEXT: ret 8657entry: 8658 tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 4) 8659 ret void 8660} 8661 8662 8663define void @test_vsoxseg2_nxv4f16_triscv.vector.tuple_nxv8i8_2t_nxv4i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, <vscale x 4 x i16> %index, i64 %vl) { 8664; CHECK-LABEL: test_vsoxseg2_nxv4f16_triscv.vector.tuple_nxv8i8_2t_nxv4i16: 8665; CHECK: # %bb.0: # %entry 8666; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma 8667; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10 8668; CHECK-NEXT: ret 8669entry: 8670 tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, <vscale x 4 x i16> %index, i64 %vl, i64 4) 8671 ret void 8672} 8673 8674define void @test_vsoxseg2_mask_nxv4f16_triscv.vector.tuple_nxv8i8_2t_nxv4i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, <vscale x 4 x i16> %index, i64 %vl, <vscale x 4 x i1> %mask) { 8675; CHECK-LABEL: test_vsoxseg2_mask_nxv4f16_triscv.vector.tuple_nxv8i8_2t_nxv4i16: 8676; CHECK: # %bb.0: # %entry 8677; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma 8678; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10, v0.t 8679; CHECK-NEXT: ret 8680entry: 8681 tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 4) 8682 ret void 8683} 8684 8685 8686define void @test_vsoxseg2_nxv4f16_triscv.vector.tuple_nxv8i8_2t_nxv4i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, <vscale x 4 x i32> %index, i64 %vl) { 8687; CHECK-LABEL: test_vsoxseg2_nxv4f16_triscv.vector.tuple_nxv8i8_2t_nxv4i32: 8688; CHECK: # %bb.0: # %entry 8689; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma 8690; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10 8691; CHECK-NEXT: ret 8692entry: 8693 tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, <vscale x 4 x i32> %index, i64 %vl, i64 4) 8694 ret void 8695} 8696 8697define void @test_vsoxseg2_mask_nxv4f16_triscv.vector.tuple_nxv8i8_2t_nxv4i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, <vscale x 4 x i32> %index, i64 %vl, <vscale x 4 x i1> %mask) { 8698; CHECK-LABEL: test_vsoxseg2_mask_nxv4f16_triscv.vector.tuple_nxv8i8_2t_nxv4i32: 8699; CHECK: # %bb.0: # %entry 8700; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma 8701; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10, v0.t 8702; CHECK-NEXT: ret 8703entry: 8704 tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 4) 8705 ret void 8706} 8707 8708 8709define void @test_vsoxseg2_nxv4f16_triscv.vector.tuple_nxv8i8_2t_nxv4i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, <vscale x 4 x i64> %index, i64 %vl) { 8710; CHECK-LABEL: test_vsoxseg2_nxv4f16_triscv.vector.tuple_nxv8i8_2t_nxv4i64: 8711; CHECK: # %bb.0: # %entry 8712; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma 8713; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v12 8714; CHECK-NEXT: ret 8715entry: 8716 tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, <vscale x 4 x i64> %index, i64 %vl, i64 4) 8717 ret void 8718} 8719 8720define void @test_vsoxseg2_mask_nxv4f16_triscv.vector.tuple_nxv8i8_2t_nxv4i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, <vscale x 4 x i64> %index, i64 %vl, <vscale x 4 x i1> %mask) { 8721; CHECK-LABEL: test_vsoxseg2_mask_nxv4f16_triscv.vector.tuple_nxv8i8_2t_nxv4i64: 8722; CHECK: # %bb.0: # %entry 8723; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma 8724; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v12, v0.t 8725; CHECK-NEXT: ret 8726entry: 8727 tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i64.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 4) 8728 ret void 8729} 8730 8731 8732define void @test_vsoxseg2_nxv8f16_triscv.vector.tuple_nxv16i8_2t_nxv8i8(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, <vscale x 8 x i8> %index, i64 %vl) { 8733; CHECK-LABEL: test_vsoxseg2_nxv8f16_triscv.vector.tuple_nxv16i8_2t_nxv8i8: 8734; CHECK: # %bb.0: # %entry 8735; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma 8736; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v12 8737; CHECK-NEXT: ret 8738entry: 8739 tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i8(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, <vscale x 8 x i8> %index, i64 %vl, i64 4) 8740 ret void 8741} 8742 8743define void @test_vsoxseg2_mask_nxv8f16_triscv.vector.tuple_nxv16i8_2t_nxv8i8(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, <vscale x 8 x i8> %index, i64 %vl, <vscale x 8 x i1> %mask) { 8744; CHECK-LABEL: test_vsoxseg2_mask_nxv8f16_triscv.vector.tuple_nxv16i8_2t_nxv8i8: 8745; CHECK: # %bb.0: # %entry 8746; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma 8747; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v12, v0.t 8748; CHECK-NEXT: ret 8749entry: 8750 tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, <vscale x 8 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 4) 8751 ret void 8752} 8753 8754 8755define void @test_vsoxseg2_nxv8f16_triscv.vector.tuple_nxv16i8_2t_nxv8i16(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, <vscale x 8 x i16> %index, i64 %vl) { 8756; CHECK-LABEL: test_vsoxseg2_nxv8f16_triscv.vector.tuple_nxv16i8_2t_nxv8i16: 8757; CHECK: # %bb.0: # %entry 8758; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma 8759; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v12 8760; CHECK-NEXT: ret 8761entry: 8762 tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i16(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, <vscale x 8 x i16> %index, i64 %vl, i64 4) 8763 ret void 8764} 8765 8766define void @test_vsoxseg2_mask_nxv8f16_triscv.vector.tuple_nxv16i8_2t_nxv8i16(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, <vscale x 8 x i16> %index, i64 %vl, <vscale x 8 x i1> %mask) { 8767; CHECK-LABEL: test_vsoxseg2_mask_nxv8f16_triscv.vector.tuple_nxv16i8_2t_nxv8i16: 8768; CHECK: # %bb.0: # %entry 8769; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma 8770; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v12, v0.t 8771; CHECK-NEXT: ret 8772entry: 8773 tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 4) 8774 ret void 8775} 8776 8777 8778define void @test_vsoxseg2_nxv8f16_triscv.vector.tuple_nxv16i8_2t_nxv8i32(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, <vscale x 8 x i32> %index, i64 %vl) { 8779; CHECK-LABEL: test_vsoxseg2_nxv8f16_triscv.vector.tuple_nxv16i8_2t_nxv8i32: 8780; CHECK: # %bb.0: # %entry 8781; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma 8782; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v12 8783; CHECK-NEXT: ret 8784entry: 8785 tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i32(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, <vscale x 8 x i32> %index, i64 %vl, i64 4) 8786 ret void 8787} 8788 8789define void @test_vsoxseg2_mask_nxv8f16_triscv.vector.tuple_nxv16i8_2t_nxv8i32(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, <vscale x 8 x i32> %index, i64 %vl, <vscale x 8 x i1> %mask) { 8790; CHECK-LABEL: test_vsoxseg2_mask_nxv8f16_triscv.vector.tuple_nxv16i8_2t_nxv8i32: 8791; CHECK: # %bb.0: # %entry 8792; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma 8793; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v12, v0.t 8794; CHECK-NEXT: ret 8795entry: 8796 tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 4) 8797 ret void 8798} 8799 8800 8801define void @test_vsoxseg2_nxv8f16_triscv.vector.tuple_nxv16i8_2t_nxv8i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, <vscale x 8 x i64> %index, i64 %vl) { 8802; CHECK-LABEL: test_vsoxseg2_nxv8f16_triscv.vector.tuple_nxv16i8_2t_nxv8i64: 8803; CHECK: # %bb.0: # %entry 8804; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma 8805; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v16 8806; CHECK-NEXT: ret 8807entry: 8808 tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, <vscale x 8 x i64> %index, i64 %vl, i64 4) 8809 ret void 8810} 8811 8812define void @test_vsoxseg2_mask_nxv8f16_triscv.vector.tuple_nxv16i8_2t_nxv8i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, <vscale x 8 x i64> %index, i64 %vl, <vscale x 8 x i1> %mask) { 8813; CHECK-LABEL: test_vsoxseg2_mask_nxv8f16_triscv.vector.tuple_nxv16i8_2t_nxv8i64: 8814; CHECK: # %bb.0: # %entry 8815; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma 8816; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v16, v0.t 8817; CHECK-NEXT: ret 8818entry: 8819 tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i64.nxv8i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, <vscale x 8 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 4) 8820 ret void 8821} 8822 8823 8824define void @test_vsoxseg2_nxv16f16_triscv.vector.tuple_nxv32i8_2t_nxv16i8(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, <vscale x 16 x i8> %index, i64 %vl) { 8825; CHECK-LABEL: test_vsoxseg2_nxv16f16_triscv.vector.tuple_nxv32i8_2t_nxv16i8: 8826; CHECK: # %bb.0: # %entry 8827; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma 8828; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v16 8829; CHECK-NEXT: ret 8830entry: 8831 tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i8(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, <vscale x 16 x i8> %index, i64 %vl, i64 4) 8832 ret void 8833} 8834 8835define void @test_vsoxseg2_mask_nxv16f16_triscv.vector.tuple_nxv32i8_2t_nxv16i8(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, <vscale x 16 x i8> %index, i64 %vl, <vscale x 16 x i1> %mask) { 8836; CHECK-LABEL: test_vsoxseg2_mask_nxv16f16_triscv.vector.tuple_nxv32i8_2t_nxv16i8: 8837; CHECK: # %bb.0: # %entry 8838; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma 8839; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v16, v0.t 8840; CHECK-NEXT: ret 8841entry: 8842 tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, <vscale x 16 x i8> %index, <vscale x 16 x i1> %mask, i64 %vl, i64 4) 8843 ret void 8844} 8845 8846 8847define void @test_vsoxseg2_nxv16f16_triscv.vector.tuple_nxv32i8_2t_nxv16i16(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, <vscale x 16 x i16> %index, i64 %vl) { 8848; CHECK-LABEL: test_vsoxseg2_nxv16f16_triscv.vector.tuple_nxv32i8_2t_nxv16i16: 8849; CHECK: # %bb.0: # %entry 8850; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma 8851; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v16 8852; CHECK-NEXT: ret 8853entry: 8854 tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i16(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, <vscale x 16 x i16> %index, i64 %vl, i64 4) 8855 ret void 8856} 8857 8858define void @test_vsoxseg2_mask_nxv16f16_triscv.vector.tuple_nxv32i8_2t_nxv16i16(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, <vscale x 16 x i16> %index, i64 %vl, <vscale x 16 x i1> %mask) { 8859; CHECK-LABEL: test_vsoxseg2_mask_nxv16f16_triscv.vector.tuple_nxv32i8_2t_nxv16i16: 8860; CHECK: # %bb.0: # %entry 8861; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma 8862; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v16, v0.t 8863; CHECK-NEXT: ret 8864entry: 8865 tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, <vscale x 16 x i16> %index, <vscale x 16 x i1> %mask, i64 %vl, i64 4) 8866 ret void 8867} 8868 8869 8870define void @test_vsoxseg2_nxv16f16_triscv.vector.tuple_nxv32i8_2t_nxv16i32(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, <vscale x 16 x i32> %index, i64 %vl) { 8871; CHECK-LABEL: test_vsoxseg2_nxv16f16_triscv.vector.tuple_nxv32i8_2t_nxv16i32: 8872; CHECK: # %bb.0: # %entry 8873; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma 8874; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v16 8875; CHECK-NEXT: ret 8876entry: 8877 tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i32(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, <vscale x 16 x i32> %index, i64 %vl, i64 4) 8878 ret void 8879} 8880 8881define void @test_vsoxseg2_mask_nxv16f16_triscv.vector.tuple_nxv32i8_2t_nxv16i32(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, <vscale x 16 x i32> %index, i64 %vl, <vscale x 16 x i1> %mask) { 8882; CHECK-LABEL: test_vsoxseg2_mask_nxv16f16_triscv.vector.tuple_nxv32i8_2t_nxv16i32: 8883; CHECK: # %bb.0: # %entry 8884; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma 8885; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v16, v0.t 8886; CHECK-NEXT: ret 8887entry: 8888 tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, <vscale x 16 x i32> %index, <vscale x 16 x i1> %mask, i64 %vl, i64 4) 8889 ret void 8890} 8891 8892 8893define void @test_vsoxseg3_nxv1f16_triscv.vector.tuple_nxv2i8_3t_nxv1i8(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) %val, ptr %base, <vscale x 1 x i8> %index, i64 %vl) { 8894; CHECK-LABEL: test_vsoxseg3_nxv1f16_triscv.vector.tuple_nxv2i8_3t_nxv1i8: 8895; CHECK: # %bb.0: # %entry 8896; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma 8897; CHECK-NEXT: vsoxseg3ei8.v v8, (a0), v11 8898; CHECK-NEXT: ret 8899entry: 8900 tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i8(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) %val, ptr %base, <vscale x 1 x i8> %index, i64 %vl, i64 4) 8901 ret void 8902} 8903 8904define void @test_vsoxseg3_mask_nxv1f16_triscv.vector.tuple_nxv2i8_3t_nxv1i8(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) %val, ptr %base, <vscale x 1 x i8> %index, i64 %vl, <vscale x 1 x i1> %mask) { 8905; CHECK-LABEL: test_vsoxseg3_mask_nxv1f16_triscv.vector.tuple_nxv2i8_3t_nxv1i8: 8906; CHECK: # %bb.0: # %entry 8907; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma 8908; CHECK-NEXT: vsoxseg3ei8.v v8, (a0), v11, v0.t 8909; CHECK-NEXT: ret 8910entry: 8911 tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 4) 8912 ret void 8913} 8914 8915 8916define void @test_vsoxseg3_nxv1f16_triscv.vector.tuple_nxv2i8_3t_nxv1i16(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl) { 8917; CHECK-LABEL: test_vsoxseg3_nxv1f16_triscv.vector.tuple_nxv2i8_3t_nxv1i16: 8918; CHECK: # %bb.0: # %entry 8919; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma 8920; CHECK-NEXT: vsoxseg3ei16.v v8, (a0), v11 8921; CHECK-NEXT: ret 8922entry: 8923 tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i16(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl, i64 4) 8924 ret void 8925} 8926 8927define void @test_vsoxseg3_mask_nxv1f16_triscv.vector.tuple_nxv2i8_3t_nxv1i16(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl, <vscale x 1 x i1> %mask) { 8928; CHECK-LABEL: test_vsoxseg3_mask_nxv1f16_triscv.vector.tuple_nxv2i8_3t_nxv1i16: 8929; CHECK: # %bb.0: # %entry 8930; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma 8931; CHECK-NEXT: vsoxseg3ei16.v v8, (a0), v11, v0.t 8932; CHECK-NEXT: ret 8933entry: 8934 tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 4) 8935 ret void 8936} 8937 8938 8939define void @test_vsoxseg3_nxv1f16_triscv.vector.tuple_nxv2i8_3t_nxv1i32(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) %val, ptr %base, <vscale x 1 x i32> %index, i64 %vl) { 8940; CHECK-LABEL: test_vsoxseg3_nxv1f16_triscv.vector.tuple_nxv2i8_3t_nxv1i32: 8941; CHECK: # %bb.0: # %entry 8942; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma 8943; CHECK-NEXT: vsoxseg3ei32.v v8, (a0), v11 8944; CHECK-NEXT: ret 8945entry: 8946 tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i32(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) %val, ptr %base, <vscale x 1 x i32> %index, i64 %vl, i64 4) 8947 ret void 8948} 8949 8950define void @test_vsoxseg3_mask_nxv1f16_triscv.vector.tuple_nxv2i8_3t_nxv1i32(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) %val, ptr %base, <vscale x 1 x i32> %index, i64 %vl, <vscale x 1 x i1> %mask) { 8951; CHECK-LABEL: test_vsoxseg3_mask_nxv1f16_triscv.vector.tuple_nxv2i8_3t_nxv1i32: 8952; CHECK: # %bb.0: # %entry 8953; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma 8954; CHECK-NEXT: vsoxseg3ei32.v v8, (a0), v11, v0.t 8955; CHECK-NEXT: ret 8956entry: 8957 tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 4) 8958 ret void 8959} 8960 8961 8962define void @test_vsoxseg3_nxv1f16_triscv.vector.tuple_nxv2i8_3t_nxv1i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) %val, ptr %base, <vscale x 1 x i64> %index, i64 %vl) { 8963; CHECK-LABEL: test_vsoxseg3_nxv1f16_triscv.vector.tuple_nxv2i8_3t_nxv1i64: 8964; CHECK: # %bb.0: # %entry 8965; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma 8966; CHECK-NEXT: vsoxseg3ei64.v v8, (a0), v11 8967; CHECK-NEXT: ret 8968entry: 8969 tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) %val, ptr %base, <vscale x 1 x i64> %index, i64 %vl, i64 4) 8970 ret void 8971} 8972 8973define void @test_vsoxseg3_mask_nxv1f16_triscv.vector.tuple_nxv2i8_3t_nxv1i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) %val, ptr %base, <vscale x 1 x i64> %index, i64 %vl, <vscale x 1 x i1> %mask) { 8974; CHECK-LABEL: test_vsoxseg3_mask_nxv1f16_triscv.vector.tuple_nxv2i8_3t_nxv1i64: 8975; CHECK: # %bb.0: # %entry 8976; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma 8977; CHECK-NEXT: vsoxseg3ei64.v v8, (a0), v11, v0.t 8978; CHECK-NEXT: ret 8979entry: 8980 tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i64.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) %val, ptr %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 4) 8981 ret void 8982} 8983 8984 8985define void @test_vsoxseg3_nxv2f16_triscv.vector.tuple_nxv4i8_3t_nxv2i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %val, ptr %base, <vscale x 2 x i8> %index, i64 %vl) { 8986; CHECK-LABEL: test_vsoxseg3_nxv2f16_triscv.vector.tuple_nxv4i8_3t_nxv2i8: 8987; CHECK: # %bb.0: # %entry 8988; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma 8989; CHECK-NEXT: vsoxseg3ei8.v v8, (a0), v11 8990; CHECK-NEXT: ret 8991entry: 8992 tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %val, ptr %base, <vscale x 2 x i8> %index, i64 %vl, i64 4) 8993 ret void 8994} 8995 8996define void @test_vsoxseg3_mask_nxv2f16_triscv.vector.tuple_nxv4i8_3t_nxv2i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %val, ptr %base, <vscale x 2 x i8> %index, i64 %vl, <vscale x 2 x i1> %mask) { 8997; CHECK-LABEL: test_vsoxseg3_mask_nxv2f16_triscv.vector.tuple_nxv4i8_3t_nxv2i8: 8998; CHECK: # %bb.0: # %entry 8999; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma 9000; CHECK-NEXT: vsoxseg3ei8.v v8, (a0), v11, v0.t 9001; CHECK-NEXT: ret 9002entry: 9003 tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 4) 9004 ret void 9005} 9006 9007 9008define void @test_vsoxseg3_nxv2f16_triscv.vector.tuple_nxv4i8_3t_nxv2i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %val, ptr %base, <vscale x 2 x i16> %index, i64 %vl) { 9009; CHECK-LABEL: test_vsoxseg3_nxv2f16_triscv.vector.tuple_nxv4i8_3t_nxv2i16: 9010; CHECK: # %bb.0: # %entry 9011; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma 9012; CHECK-NEXT: vsoxseg3ei16.v v8, (a0), v11 9013; CHECK-NEXT: ret 9014entry: 9015 tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %val, ptr %base, <vscale x 2 x i16> %index, i64 %vl, i64 4) 9016 ret void 9017} 9018 9019define void @test_vsoxseg3_mask_nxv2f16_triscv.vector.tuple_nxv4i8_3t_nxv2i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %val, ptr %base, <vscale x 2 x i16> %index, i64 %vl, <vscale x 2 x i1> %mask) { 9020; CHECK-LABEL: test_vsoxseg3_mask_nxv2f16_triscv.vector.tuple_nxv4i8_3t_nxv2i16: 9021; CHECK: # %bb.0: # %entry 9022; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma 9023; CHECK-NEXT: vsoxseg3ei16.v v8, (a0), v11, v0.t 9024; CHECK-NEXT: ret 9025entry: 9026 tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 4) 9027 ret void 9028} 9029 9030 9031define void @test_vsoxseg3_nxv2f16_triscv.vector.tuple_nxv4i8_3t_nxv2i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %val, ptr %base, <vscale x 2 x i32> %index, i64 %vl) { 9032; CHECK-LABEL: test_vsoxseg3_nxv2f16_triscv.vector.tuple_nxv4i8_3t_nxv2i32: 9033; CHECK: # %bb.0: # %entry 9034; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma 9035; CHECK-NEXT: vsoxseg3ei32.v v8, (a0), v11 9036; CHECK-NEXT: ret 9037entry: 9038 tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %val, ptr %base, <vscale x 2 x i32> %index, i64 %vl, i64 4) 9039 ret void 9040} 9041 9042define void @test_vsoxseg3_mask_nxv2f16_triscv.vector.tuple_nxv4i8_3t_nxv2i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %val, ptr %base, <vscale x 2 x i32> %index, i64 %vl, <vscale x 2 x i1> %mask) { 9043; CHECK-LABEL: test_vsoxseg3_mask_nxv2f16_triscv.vector.tuple_nxv4i8_3t_nxv2i32: 9044; CHECK: # %bb.0: # %entry 9045; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma 9046; CHECK-NEXT: vsoxseg3ei32.v v8, (a0), v11, v0.t 9047; CHECK-NEXT: ret 9048entry: 9049 tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 4) 9050 ret void 9051} 9052 9053 9054define void @test_vsoxseg3_nxv2f16_triscv.vector.tuple_nxv4i8_3t_nxv2i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl) { 9055; CHECK-LABEL: test_vsoxseg3_nxv2f16_triscv.vector.tuple_nxv4i8_3t_nxv2i64: 9056; CHECK: # %bb.0: # %entry 9057; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma 9058; CHECK-NEXT: vsoxseg3ei64.v v8, (a0), v12 9059; CHECK-NEXT: ret 9060entry: 9061 tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl, i64 4) 9062 ret void 9063} 9064 9065define void @test_vsoxseg3_mask_nxv2f16_triscv.vector.tuple_nxv4i8_3t_nxv2i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl, <vscale x 2 x i1> %mask) { 9066; CHECK-LABEL: test_vsoxseg3_mask_nxv2f16_triscv.vector.tuple_nxv4i8_3t_nxv2i64: 9067; CHECK: # %bb.0: # %entry 9068; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma 9069; CHECK-NEXT: vsoxseg3ei64.v v8, (a0), v12, v0.t 9070; CHECK-NEXT: ret 9071entry: 9072 tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i64.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %val, ptr %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 4) 9073 ret void 9074} 9075 9076 9077define void @test_vsoxseg3_nxv4f16_triscv.vector.tuple_nxv8i8_3t_nxv4i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, <vscale x 4 x i8> %index, i64 %vl) { 9078; CHECK-LABEL: test_vsoxseg3_nxv4f16_triscv.vector.tuple_nxv8i8_3t_nxv4i8: 9079; CHECK: # %bb.0: # %entry 9080; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma 9081; CHECK-NEXT: vsoxseg3ei8.v v8, (a0), v11 9082; CHECK-NEXT: ret 9083entry: 9084 tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, <vscale x 4 x i8> %index, i64 %vl, i64 4) 9085 ret void 9086} 9087 9088define void @test_vsoxseg3_mask_nxv4f16_triscv.vector.tuple_nxv8i8_3t_nxv4i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, <vscale x 4 x i8> %index, i64 %vl, <vscale x 4 x i1> %mask) { 9089; CHECK-LABEL: test_vsoxseg3_mask_nxv4f16_triscv.vector.tuple_nxv8i8_3t_nxv4i8: 9090; CHECK: # %bb.0: # %entry 9091; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma 9092; CHECK-NEXT: vsoxseg3ei8.v v8, (a0), v11, v0.t 9093; CHECK-NEXT: ret 9094entry: 9095 tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 4) 9096 ret void 9097} 9098 9099 9100define void @test_vsoxseg3_nxv4f16_triscv.vector.tuple_nxv8i8_3t_nxv4i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, <vscale x 4 x i16> %index, i64 %vl) { 9101; CHECK-LABEL: test_vsoxseg3_nxv4f16_triscv.vector.tuple_nxv8i8_3t_nxv4i16: 9102; CHECK: # %bb.0: # %entry 9103; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma 9104; CHECK-NEXT: vsoxseg3ei16.v v8, (a0), v11 9105; CHECK-NEXT: ret 9106entry: 9107 tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, <vscale x 4 x i16> %index, i64 %vl, i64 4) 9108 ret void 9109} 9110 9111define void @test_vsoxseg3_mask_nxv4f16_triscv.vector.tuple_nxv8i8_3t_nxv4i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, <vscale x 4 x i16> %index, i64 %vl, <vscale x 4 x i1> %mask) { 9112; CHECK-LABEL: test_vsoxseg3_mask_nxv4f16_triscv.vector.tuple_nxv8i8_3t_nxv4i16: 9113; CHECK: # %bb.0: # %entry 9114; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma 9115; CHECK-NEXT: vsoxseg3ei16.v v8, (a0), v11, v0.t 9116; CHECK-NEXT: ret 9117entry: 9118 tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 4) 9119 ret void 9120} 9121 9122 9123define void @test_vsoxseg3_nxv4f16_triscv.vector.tuple_nxv8i8_3t_nxv4i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, <vscale x 4 x i32> %index, i64 %vl) { 9124; CHECK-LABEL: test_vsoxseg3_nxv4f16_triscv.vector.tuple_nxv8i8_3t_nxv4i32: 9125; CHECK: # %bb.0: # %entry 9126; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma 9127; CHECK-NEXT: vsoxseg3ei32.v v8, (a0), v12 9128; CHECK-NEXT: ret 9129entry: 9130 tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, <vscale x 4 x i32> %index, i64 %vl, i64 4) 9131 ret void 9132} 9133 9134define void @test_vsoxseg3_mask_nxv4f16_triscv.vector.tuple_nxv8i8_3t_nxv4i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, <vscale x 4 x i32> %index, i64 %vl, <vscale x 4 x i1> %mask) { 9135; CHECK-LABEL: test_vsoxseg3_mask_nxv4f16_triscv.vector.tuple_nxv8i8_3t_nxv4i32: 9136; CHECK: # %bb.0: # %entry 9137; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma 9138; CHECK-NEXT: vsoxseg3ei32.v v8, (a0), v12, v0.t 9139; CHECK-NEXT: ret 9140entry: 9141 tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 4) 9142 ret void 9143} 9144 9145 9146define void @test_vsoxseg3_nxv4f16_triscv.vector.tuple_nxv8i8_3t_nxv4i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, <vscale x 4 x i64> %index, i64 %vl) { 9147; CHECK-LABEL: test_vsoxseg3_nxv4f16_triscv.vector.tuple_nxv8i8_3t_nxv4i64: 9148; CHECK: # %bb.0: # %entry 9149; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma 9150; CHECK-NEXT: vsoxseg3ei64.v v8, (a0), v12 9151; CHECK-NEXT: ret 9152entry: 9153 tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, <vscale x 4 x i64> %index, i64 %vl, i64 4) 9154 ret void 9155} 9156 9157define void @test_vsoxseg3_mask_nxv4f16_triscv.vector.tuple_nxv8i8_3t_nxv4i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, <vscale x 4 x i64> %index, i64 %vl, <vscale x 4 x i1> %mask) { 9158; CHECK-LABEL: test_vsoxseg3_mask_nxv4f16_triscv.vector.tuple_nxv8i8_3t_nxv4i64: 9159; CHECK: # %bb.0: # %entry 9160; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma 9161; CHECK-NEXT: vsoxseg3ei64.v v8, (a0), v12, v0.t 9162; CHECK-NEXT: ret 9163entry: 9164 tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i64.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 4) 9165 ret void 9166} 9167 9168 9169define void @test_vsoxseg3_nxv8f16_triscv.vector.tuple_nxv16i8_3t_nxv8i8(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, <vscale x 8 x i8> %index, i64 %vl) { 9170; CHECK-LABEL: test_vsoxseg3_nxv8f16_triscv.vector.tuple_nxv16i8_3t_nxv8i8: 9171; CHECK: # %bb.0: # %entry 9172; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma 9173; CHECK-NEXT: vsoxseg3ei8.v v8, (a0), v14 9174; CHECK-NEXT: ret 9175entry: 9176 tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i8(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, <vscale x 8 x i8> %index, i64 %vl, i64 4) 9177 ret void 9178} 9179 9180define void @test_vsoxseg3_mask_nxv8f16_triscv.vector.tuple_nxv16i8_3t_nxv8i8(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, <vscale x 8 x i8> %index, i64 %vl, <vscale x 8 x i1> %mask) { 9181; CHECK-LABEL: test_vsoxseg3_mask_nxv8f16_triscv.vector.tuple_nxv16i8_3t_nxv8i8: 9182; CHECK: # %bb.0: # %entry 9183; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma 9184; CHECK-NEXT: vsoxseg3ei8.v v8, (a0), v14, v0.t 9185; CHECK-NEXT: ret 9186entry: 9187 tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, <vscale x 8 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 4) 9188 ret void 9189} 9190 9191 9192define void @test_vsoxseg3_nxv8f16_triscv.vector.tuple_nxv16i8_3t_nxv8i16(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, <vscale x 8 x i16> %index, i64 %vl) { 9193; CHECK-LABEL: test_vsoxseg3_nxv8f16_triscv.vector.tuple_nxv16i8_3t_nxv8i16: 9194; CHECK: # %bb.0: # %entry 9195; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma 9196; CHECK-NEXT: vsoxseg3ei16.v v8, (a0), v14 9197; CHECK-NEXT: ret 9198entry: 9199 tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i16(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, <vscale x 8 x i16> %index, i64 %vl, i64 4) 9200 ret void 9201} 9202 9203define void @test_vsoxseg3_mask_nxv8f16_triscv.vector.tuple_nxv16i8_3t_nxv8i16(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, <vscale x 8 x i16> %index, i64 %vl, <vscale x 8 x i1> %mask) { 9204; CHECK-LABEL: test_vsoxseg3_mask_nxv8f16_triscv.vector.tuple_nxv16i8_3t_nxv8i16: 9205; CHECK: # %bb.0: # %entry 9206; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma 9207; CHECK-NEXT: vsoxseg3ei16.v v8, (a0), v14, v0.t 9208; CHECK-NEXT: ret 9209entry: 9210 tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 4) 9211 ret void 9212} 9213 9214 9215define void @test_vsoxseg3_nxv8f16_triscv.vector.tuple_nxv16i8_3t_nxv8i32(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, <vscale x 8 x i32> %index, i64 %vl) { 9216; CHECK-LABEL: test_vsoxseg3_nxv8f16_triscv.vector.tuple_nxv16i8_3t_nxv8i32: 9217; CHECK: # %bb.0: # %entry 9218; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma 9219; CHECK-NEXT: vsoxseg3ei32.v v8, (a0), v16 9220; CHECK-NEXT: ret 9221entry: 9222 tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i32(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, <vscale x 8 x i32> %index, i64 %vl, i64 4) 9223 ret void 9224} 9225 9226define void @test_vsoxseg3_mask_nxv8f16_triscv.vector.tuple_nxv16i8_3t_nxv8i32(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, <vscale x 8 x i32> %index, i64 %vl, <vscale x 8 x i1> %mask) { 9227; CHECK-LABEL: test_vsoxseg3_mask_nxv8f16_triscv.vector.tuple_nxv16i8_3t_nxv8i32: 9228; CHECK: # %bb.0: # %entry 9229; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma 9230; CHECK-NEXT: vsoxseg3ei32.v v8, (a0), v16, v0.t 9231; CHECK-NEXT: ret 9232entry: 9233 tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 4) 9234 ret void 9235} 9236 9237 9238define void @test_vsoxseg3_nxv8f16_triscv.vector.tuple_nxv16i8_3t_nxv8i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, <vscale x 8 x i64> %index, i64 %vl) { 9239; CHECK-LABEL: test_vsoxseg3_nxv8f16_triscv.vector.tuple_nxv16i8_3t_nxv8i64: 9240; CHECK: # %bb.0: # %entry 9241; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma 9242; CHECK-NEXT: vsoxseg3ei64.v v8, (a0), v16 9243; CHECK-NEXT: ret 9244entry: 9245 tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, <vscale x 8 x i64> %index, i64 %vl, i64 4) 9246 ret void 9247} 9248 9249define void @test_vsoxseg3_mask_nxv8f16_triscv.vector.tuple_nxv16i8_3t_nxv8i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, <vscale x 8 x i64> %index, i64 %vl, <vscale x 8 x i1> %mask) { 9250; CHECK-LABEL: test_vsoxseg3_mask_nxv8f16_triscv.vector.tuple_nxv16i8_3t_nxv8i64: 9251; CHECK: # %bb.0: # %entry 9252; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma 9253; CHECK-NEXT: vsoxseg3ei64.v v8, (a0), v16, v0.t 9254; CHECK-NEXT: ret 9255entry: 9256 tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i64.nxv8i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, <vscale x 8 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 4) 9257 ret void 9258} 9259 9260 9261define void @test_vsoxseg4_nxv1f16_triscv.vector.tuple_nxv2i8_4t_nxv1i8(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) %val, ptr %base, <vscale x 1 x i8> %index, i64 %vl) { 9262; CHECK-LABEL: test_vsoxseg4_nxv1f16_triscv.vector.tuple_nxv2i8_4t_nxv1i8: 9263; CHECK: # %bb.0: # %entry 9264; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma 9265; CHECK-NEXT: vsoxseg4ei8.v v8, (a0), v12 9266; CHECK-NEXT: ret 9267entry: 9268 tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i8(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) %val, ptr %base, <vscale x 1 x i8> %index, i64 %vl, i64 4) 9269 ret void 9270} 9271 9272define void @test_vsoxseg4_mask_nxv1f16_triscv.vector.tuple_nxv2i8_4t_nxv1i8(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) %val, ptr %base, <vscale x 1 x i8> %index, i64 %vl, <vscale x 1 x i1> %mask) { 9273; CHECK-LABEL: test_vsoxseg4_mask_nxv1f16_triscv.vector.tuple_nxv2i8_4t_nxv1i8: 9274; CHECK: # %bb.0: # %entry 9275; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma 9276; CHECK-NEXT: vsoxseg4ei8.v v8, (a0), v12, v0.t 9277; CHECK-NEXT: ret 9278entry: 9279 tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 4) 9280 ret void 9281} 9282 9283 9284define void @test_vsoxseg4_nxv1f16_triscv.vector.tuple_nxv2i8_4t_nxv1i16(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl) { 9285; CHECK-LABEL: test_vsoxseg4_nxv1f16_triscv.vector.tuple_nxv2i8_4t_nxv1i16: 9286; CHECK: # %bb.0: # %entry 9287; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma 9288; CHECK-NEXT: vsoxseg4ei16.v v8, (a0), v12 9289; CHECK-NEXT: ret 9290entry: 9291 tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i16(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl, i64 4) 9292 ret void 9293} 9294 9295define void @test_vsoxseg4_mask_nxv1f16_triscv.vector.tuple_nxv2i8_4t_nxv1i16(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl, <vscale x 1 x i1> %mask) { 9296; CHECK-LABEL: test_vsoxseg4_mask_nxv1f16_triscv.vector.tuple_nxv2i8_4t_nxv1i16: 9297; CHECK: # %bb.0: # %entry 9298; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma 9299; CHECK-NEXT: vsoxseg4ei16.v v8, (a0), v12, v0.t 9300; CHECK-NEXT: ret 9301entry: 9302 tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 4) 9303 ret void 9304} 9305 9306 9307define void @test_vsoxseg4_nxv1f16_triscv.vector.tuple_nxv2i8_4t_nxv1i32(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) %val, ptr %base, <vscale x 1 x i32> %index, i64 %vl) { 9308; CHECK-LABEL: test_vsoxseg4_nxv1f16_triscv.vector.tuple_nxv2i8_4t_nxv1i32: 9309; CHECK: # %bb.0: # %entry 9310; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma 9311; CHECK-NEXT: vsoxseg4ei32.v v8, (a0), v12 9312; CHECK-NEXT: ret 9313entry: 9314 tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i32(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) %val, ptr %base, <vscale x 1 x i32> %index, i64 %vl, i64 4) 9315 ret void 9316} 9317 9318define void @test_vsoxseg4_mask_nxv1f16_triscv.vector.tuple_nxv2i8_4t_nxv1i32(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) %val, ptr %base, <vscale x 1 x i32> %index, i64 %vl, <vscale x 1 x i1> %mask) { 9319; CHECK-LABEL: test_vsoxseg4_mask_nxv1f16_triscv.vector.tuple_nxv2i8_4t_nxv1i32: 9320; CHECK: # %bb.0: # %entry 9321; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma 9322; CHECK-NEXT: vsoxseg4ei32.v v8, (a0), v12, v0.t 9323; CHECK-NEXT: ret 9324entry: 9325 tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 4) 9326 ret void 9327} 9328 9329 9330define void @test_vsoxseg4_nxv1f16_triscv.vector.tuple_nxv2i8_4t_nxv1i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) %val, ptr %base, <vscale x 1 x i64> %index, i64 %vl) { 9331; CHECK-LABEL: test_vsoxseg4_nxv1f16_triscv.vector.tuple_nxv2i8_4t_nxv1i64: 9332; CHECK: # %bb.0: # %entry 9333; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma 9334; CHECK-NEXT: vsoxseg4ei64.v v8, (a0), v12 9335; CHECK-NEXT: ret 9336entry: 9337 tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) %val, ptr %base, <vscale x 1 x i64> %index, i64 %vl, i64 4) 9338 ret void 9339} 9340 9341define void @test_vsoxseg4_mask_nxv1f16_triscv.vector.tuple_nxv2i8_4t_nxv1i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) %val, ptr %base, <vscale x 1 x i64> %index, i64 %vl, <vscale x 1 x i1> %mask) { 9342; CHECK-LABEL: test_vsoxseg4_mask_nxv1f16_triscv.vector.tuple_nxv2i8_4t_nxv1i64: 9343; CHECK: # %bb.0: # %entry 9344; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma 9345; CHECK-NEXT: vsoxseg4ei64.v v8, (a0), v12, v0.t 9346; CHECK-NEXT: ret 9347entry: 9348 tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i64.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) %val, ptr %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 4) 9349 ret void 9350} 9351 9352 9353define void @test_vsoxseg4_nxv2f16_triscv.vector.tuple_nxv4i8_4t_nxv2i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %val, ptr %base, <vscale x 2 x i8> %index, i64 %vl) { 9354; CHECK-LABEL: test_vsoxseg4_nxv2f16_triscv.vector.tuple_nxv4i8_4t_nxv2i8: 9355; CHECK: # %bb.0: # %entry 9356; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma 9357; CHECK-NEXT: vsoxseg4ei8.v v8, (a0), v12 9358; CHECK-NEXT: ret 9359entry: 9360 tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %val, ptr %base, <vscale x 2 x i8> %index, i64 %vl, i64 4) 9361 ret void 9362} 9363 9364define void @test_vsoxseg4_mask_nxv2f16_triscv.vector.tuple_nxv4i8_4t_nxv2i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %val, ptr %base, <vscale x 2 x i8> %index, i64 %vl, <vscale x 2 x i1> %mask) { 9365; CHECK-LABEL: test_vsoxseg4_mask_nxv2f16_triscv.vector.tuple_nxv4i8_4t_nxv2i8: 9366; CHECK: # %bb.0: # %entry 9367; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma 9368; CHECK-NEXT: vsoxseg4ei8.v v8, (a0), v12, v0.t 9369; CHECK-NEXT: ret 9370entry: 9371 tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 4) 9372 ret void 9373} 9374 9375 9376define void @test_vsoxseg4_nxv2f16_triscv.vector.tuple_nxv4i8_4t_nxv2i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %val, ptr %base, <vscale x 2 x i16> %index, i64 %vl) { 9377; CHECK-LABEL: test_vsoxseg4_nxv2f16_triscv.vector.tuple_nxv4i8_4t_nxv2i16: 9378; CHECK: # %bb.0: # %entry 9379; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma 9380; CHECK-NEXT: vsoxseg4ei16.v v8, (a0), v12 9381; CHECK-NEXT: ret 9382entry: 9383 tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %val, ptr %base, <vscale x 2 x i16> %index, i64 %vl, i64 4) 9384 ret void 9385} 9386 9387define void @test_vsoxseg4_mask_nxv2f16_triscv.vector.tuple_nxv4i8_4t_nxv2i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %val, ptr %base, <vscale x 2 x i16> %index, i64 %vl, <vscale x 2 x i1> %mask) { 9388; CHECK-LABEL: test_vsoxseg4_mask_nxv2f16_triscv.vector.tuple_nxv4i8_4t_nxv2i16: 9389; CHECK: # %bb.0: # %entry 9390; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma 9391; CHECK-NEXT: vsoxseg4ei16.v v8, (a0), v12, v0.t 9392; CHECK-NEXT: ret 9393entry: 9394 tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 4) 9395 ret void 9396} 9397 9398 9399define void @test_vsoxseg4_nxv2f16_triscv.vector.tuple_nxv4i8_4t_nxv2i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %val, ptr %base, <vscale x 2 x i32> %index, i64 %vl) { 9400; CHECK-LABEL: test_vsoxseg4_nxv2f16_triscv.vector.tuple_nxv4i8_4t_nxv2i32: 9401; CHECK: # %bb.0: # %entry 9402; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma 9403; CHECK-NEXT: vsoxseg4ei32.v v8, (a0), v12 9404; CHECK-NEXT: ret 9405entry: 9406 tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %val, ptr %base, <vscale x 2 x i32> %index, i64 %vl, i64 4) 9407 ret void 9408} 9409 9410define void @test_vsoxseg4_mask_nxv2f16_triscv.vector.tuple_nxv4i8_4t_nxv2i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %val, ptr %base, <vscale x 2 x i32> %index, i64 %vl, <vscale x 2 x i1> %mask) { 9411; CHECK-LABEL: test_vsoxseg4_mask_nxv2f16_triscv.vector.tuple_nxv4i8_4t_nxv2i32: 9412; CHECK: # %bb.0: # %entry 9413; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma 9414; CHECK-NEXT: vsoxseg4ei32.v v8, (a0), v12, v0.t 9415; CHECK-NEXT: ret 9416entry: 9417 tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 4) 9418 ret void 9419} 9420 9421 9422define void @test_vsoxseg4_nxv2f16_triscv.vector.tuple_nxv4i8_4t_nxv2i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl) { 9423; CHECK-LABEL: test_vsoxseg4_nxv2f16_triscv.vector.tuple_nxv4i8_4t_nxv2i64: 9424; CHECK: # %bb.0: # %entry 9425; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma 9426; CHECK-NEXT: vsoxseg4ei64.v v8, (a0), v12 9427; CHECK-NEXT: ret 9428entry: 9429 tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl, i64 4) 9430 ret void 9431} 9432 9433define void @test_vsoxseg4_mask_nxv2f16_triscv.vector.tuple_nxv4i8_4t_nxv2i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl, <vscale x 2 x i1> %mask) { 9434; CHECK-LABEL: test_vsoxseg4_mask_nxv2f16_triscv.vector.tuple_nxv4i8_4t_nxv2i64: 9435; CHECK: # %bb.0: # %entry 9436; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma 9437; CHECK-NEXT: vsoxseg4ei64.v v8, (a0), v12, v0.t 9438; CHECK-NEXT: ret 9439entry: 9440 tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i64.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %val, ptr %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 4) 9441 ret void 9442} 9443 9444 9445define void @test_vsoxseg4_nxv4f16_triscv.vector.tuple_nxv8i8_4t_nxv4i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, <vscale x 4 x i8> %index, i64 %vl) { 9446; CHECK-LABEL: test_vsoxseg4_nxv4f16_triscv.vector.tuple_nxv8i8_4t_nxv4i8: 9447; CHECK: # %bb.0: # %entry 9448; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma 9449; CHECK-NEXT: vsoxseg4ei8.v v8, (a0), v12 9450; CHECK-NEXT: ret 9451entry: 9452 tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, <vscale x 4 x i8> %index, i64 %vl, i64 4) 9453 ret void 9454} 9455 9456define void @test_vsoxseg4_mask_nxv4f16_triscv.vector.tuple_nxv8i8_4t_nxv4i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, <vscale x 4 x i8> %index, i64 %vl, <vscale x 4 x i1> %mask) { 9457; CHECK-LABEL: test_vsoxseg4_mask_nxv4f16_triscv.vector.tuple_nxv8i8_4t_nxv4i8: 9458; CHECK: # %bb.0: # %entry 9459; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma 9460; CHECK-NEXT: vsoxseg4ei8.v v8, (a0), v12, v0.t 9461; CHECK-NEXT: ret 9462entry: 9463 tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 4) 9464 ret void 9465} 9466 9467 9468define void @test_vsoxseg4_nxv4f16_triscv.vector.tuple_nxv8i8_4t_nxv4i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, <vscale x 4 x i16> %index, i64 %vl) { 9469; CHECK-LABEL: test_vsoxseg4_nxv4f16_triscv.vector.tuple_nxv8i8_4t_nxv4i16: 9470; CHECK: # %bb.0: # %entry 9471; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma 9472; CHECK-NEXT: vsoxseg4ei16.v v8, (a0), v12 9473; CHECK-NEXT: ret 9474entry: 9475 tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, <vscale x 4 x i16> %index, i64 %vl, i64 4) 9476 ret void 9477} 9478 9479define void @test_vsoxseg4_mask_nxv4f16_triscv.vector.tuple_nxv8i8_4t_nxv4i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, <vscale x 4 x i16> %index, i64 %vl, <vscale x 4 x i1> %mask) { 9480; CHECK-LABEL: test_vsoxseg4_mask_nxv4f16_triscv.vector.tuple_nxv8i8_4t_nxv4i16: 9481; CHECK: # %bb.0: # %entry 9482; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma 9483; CHECK-NEXT: vsoxseg4ei16.v v8, (a0), v12, v0.t 9484; CHECK-NEXT: ret 9485entry: 9486 tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 4) 9487 ret void 9488} 9489 9490 9491define void @test_vsoxseg4_nxv4f16_triscv.vector.tuple_nxv8i8_4t_nxv4i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, <vscale x 4 x i32> %index, i64 %vl) { 9492; CHECK-LABEL: test_vsoxseg4_nxv4f16_triscv.vector.tuple_nxv8i8_4t_nxv4i32: 9493; CHECK: # %bb.0: # %entry 9494; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma 9495; CHECK-NEXT: vsoxseg4ei32.v v8, (a0), v12 9496; CHECK-NEXT: ret 9497entry: 9498 tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, <vscale x 4 x i32> %index, i64 %vl, i64 4) 9499 ret void 9500} 9501 9502define void @test_vsoxseg4_mask_nxv4f16_triscv.vector.tuple_nxv8i8_4t_nxv4i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, <vscale x 4 x i32> %index, i64 %vl, <vscale x 4 x i1> %mask) { 9503; CHECK-LABEL: test_vsoxseg4_mask_nxv4f16_triscv.vector.tuple_nxv8i8_4t_nxv4i32: 9504; CHECK: # %bb.0: # %entry 9505; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma 9506; CHECK-NEXT: vsoxseg4ei32.v v8, (a0), v12, v0.t 9507; CHECK-NEXT: ret 9508entry: 9509 tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 4) 9510 ret void 9511} 9512 9513 9514define void @test_vsoxseg4_nxv4f16_triscv.vector.tuple_nxv8i8_4t_nxv4i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, <vscale x 4 x i64> %index, i64 %vl) { 9515; CHECK-LABEL: test_vsoxseg4_nxv4f16_triscv.vector.tuple_nxv8i8_4t_nxv4i64: 9516; CHECK: # %bb.0: # %entry 9517; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma 9518; CHECK-NEXT: vsoxseg4ei64.v v8, (a0), v12 9519; CHECK-NEXT: ret 9520entry: 9521 tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, <vscale x 4 x i64> %index, i64 %vl, i64 4) 9522 ret void 9523} 9524 9525define void @test_vsoxseg4_mask_nxv4f16_triscv.vector.tuple_nxv8i8_4t_nxv4i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, <vscale x 4 x i64> %index, i64 %vl, <vscale x 4 x i1> %mask) { 9526; CHECK-LABEL: test_vsoxseg4_mask_nxv4f16_triscv.vector.tuple_nxv8i8_4t_nxv4i64: 9527; CHECK: # %bb.0: # %entry 9528; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma 9529; CHECK-NEXT: vsoxseg4ei64.v v8, (a0), v12, v0.t 9530; CHECK-NEXT: ret 9531entry: 9532 tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i64.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 4) 9533 ret void 9534} 9535 9536 9537define void @test_vsoxseg4_nxv8f16_triscv.vector.tuple_nxv16i8_4t_nxv8i8(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, <vscale x 8 x i8> %index, i64 %vl) { 9538; CHECK-LABEL: test_vsoxseg4_nxv8f16_triscv.vector.tuple_nxv16i8_4t_nxv8i8: 9539; CHECK: # %bb.0: # %entry 9540; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma 9541; CHECK-NEXT: vsoxseg4ei8.v v8, (a0), v16 9542; CHECK-NEXT: ret 9543entry: 9544 tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i8(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, <vscale x 8 x i8> %index, i64 %vl, i64 4) 9545 ret void 9546} 9547 9548define void @test_vsoxseg4_mask_nxv8f16_triscv.vector.tuple_nxv16i8_4t_nxv8i8(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, <vscale x 8 x i8> %index, i64 %vl, <vscale x 8 x i1> %mask) { 9549; CHECK-LABEL: test_vsoxseg4_mask_nxv8f16_triscv.vector.tuple_nxv16i8_4t_nxv8i8: 9550; CHECK: # %bb.0: # %entry 9551; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma 9552; CHECK-NEXT: vsoxseg4ei8.v v8, (a0), v16, v0.t 9553; CHECK-NEXT: ret 9554entry: 9555 tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, <vscale x 8 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 4) 9556 ret void 9557} 9558 9559 9560define void @test_vsoxseg4_nxv8f16_triscv.vector.tuple_nxv16i8_4t_nxv8i16(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, <vscale x 8 x i16> %index, i64 %vl) { 9561; CHECK-LABEL: test_vsoxseg4_nxv8f16_triscv.vector.tuple_nxv16i8_4t_nxv8i16: 9562; CHECK: # %bb.0: # %entry 9563; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma 9564; CHECK-NEXT: vsoxseg4ei16.v v8, (a0), v16 9565; CHECK-NEXT: ret 9566entry: 9567 tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i16(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, <vscale x 8 x i16> %index, i64 %vl, i64 4) 9568 ret void 9569} 9570 9571define void @test_vsoxseg4_mask_nxv8f16_triscv.vector.tuple_nxv16i8_4t_nxv8i16(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, <vscale x 8 x i16> %index, i64 %vl, <vscale x 8 x i1> %mask) { 9572; CHECK-LABEL: test_vsoxseg4_mask_nxv8f16_triscv.vector.tuple_nxv16i8_4t_nxv8i16: 9573; CHECK: # %bb.0: # %entry 9574; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma 9575; CHECK-NEXT: vsoxseg4ei16.v v8, (a0), v16, v0.t 9576; CHECK-NEXT: ret 9577entry: 9578 tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 4) 9579 ret void 9580} 9581 9582 9583define void @test_vsoxseg4_nxv8f16_triscv.vector.tuple_nxv16i8_4t_nxv8i32(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, <vscale x 8 x i32> %index, i64 %vl) { 9584; CHECK-LABEL: test_vsoxseg4_nxv8f16_triscv.vector.tuple_nxv16i8_4t_nxv8i32: 9585; CHECK: # %bb.0: # %entry 9586; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma 9587; CHECK-NEXT: vsoxseg4ei32.v v8, (a0), v16 9588; CHECK-NEXT: ret 9589entry: 9590 tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i32(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, <vscale x 8 x i32> %index, i64 %vl, i64 4) 9591 ret void 9592} 9593 9594define void @test_vsoxseg4_mask_nxv8f16_triscv.vector.tuple_nxv16i8_4t_nxv8i32(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, <vscale x 8 x i32> %index, i64 %vl, <vscale x 8 x i1> %mask) { 9595; CHECK-LABEL: test_vsoxseg4_mask_nxv8f16_triscv.vector.tuple_nxv16i8_4t_nxv8i32: 9596; CHECK: # %bb.0: # %entry 9597; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma 9598; CHECK-NEXT: vsoxseg4ei32.v v8, (a0), v16, v0.t 9599; CHECK-NEXT: ret 9600entry: 9601 tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 4) 9602 ret void 9603} 9604 9605 9606define void @test_vsoxseg4_nxv8f16_triscv.vector.tuple_nxv16i8_4t_nxv8i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, <vscale x 8 x i64> %index, i64 %vl) { 9607; CHECK-LABEL: test_vsoxseg4_nxv8f16_triscv.vector.tuple_nxv16i8_4t_nxv8i64: 9608; CHECK: # %bb.0: # %entry 9609; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma 9610; CHECK-NEXT: vsoxseg4ei64.v v8, (a0), v16 9611; CHECK-NEXT: ret 9612entry: 9613 tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, <vscale x 8 x i64> %index, i64 %vl, i64 4) 9614 ret void 9615} 9616 9617define void @test_vsoxseg4_mask_nxv8f16_triscv.vector.tuple_nxv16i8_4t_nxv8i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, <vscale x 8 x i64> %index, i64 %vl, <vscale x 8 x i1> %mask) { 9618; CHECK-LABEL: test_vsoxseg4_mask_nxv8f16_triscv.vector.tuple_nxv16i8_4t_nxv8i64: 9619; CHECK: # %bb.0: # %entry 9620; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma 9621; CHECK-NEXT: vsoxseg4ei64.v v8, (a0), v16, v0.t 9622; CHECK-NEXT: ret 9623entry: 9624 tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i64.nxv8i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, <vscale x 8 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 4) 9625 ret void 9626} 9627 9628 9629define void @test_vsoxseg5_nxv1f16_triscv.vector.tuple_nxv2i8_5t_nxv1i8(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) %val, ptr %base, <vscale x 1 x i8> %index, i64 %vl) { 9630; CHECK-LABEL: test_vsoxseg5_nxv1f16_triscv.vector.tuple_nxv2i8_5t_nxv1i8: 9631; CHECK: # %bb.0: # %entry 9632; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma 9633; CHECK-NEXT: vsoxseg5ei8.v v8, (a0), v13 9634; CHECK-NEXT: ret 9635entry: 9636 tail call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i8(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) %val, ptr %base, <vscale x 1 x i8> %index, i64 %vl, i64 4) 9637 ret void 9638} 9639 9640define void @test_vsoxseg5_mask_nxv1f16_triscv.vector.tuple_nxv2i8_5t_nxv1i8(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) %val, ptr %base, <vscale x 1 x i8> %index, i64 %vl, <vscale x 1 x i1> %mask) { 9641; CHECK-LABEL: test_vsoxseg5_mask_nxv1f16_triscv.vector.tuple_nxv2i8_5t_nxv1i8: 9642; CHECK: # %bb.0: # %entry 9643; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma 9644; CHECK-NEXT: vsoxseg5ei8.v v8, (a0), v13, v0.t 9645; CHECK-NEXT: ret 9646entry: 9647 tail call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 4) 9648 ret void 9649} 9650 9651 9652define void @test_vsoxseg5_nxv1f16_triscv.vector.tuple_nxv2i8_5t_nxv1i16(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl) { 9653; CHECK-LABEL: test_vsoxseg5_nxv1f16_triscv.vector.tuple_nxv2i8_5t_nxv1i16: 9654; CHECK: # %bb.0: # %entry 9655; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma 9656; CHECK-NEXT: vsoxseg5ei16.v v8, (a0), v13 9657; CHECK-NEXT: ret 9658entry: 9659 tail call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i16(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl, i64 4) 9660 ret void 9661} 9662 9663define void @test_vsoxseg5_mask_nxv1f16_triscv.vector.tuple_nxv2i8_5t_nxv1i16(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl, <vscale x 1 x i1> %mask) { 9664; CHECK-LABEL: test_vsoxseg5_mask_nxv1f16_triscv.vector.tuple_nxv2i8_5t_nxv1i16: 9665; CHECK: # %bb.0: # %entry 9666; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma 9667; CHECK-NEXT: vsoxseg5ei16.v v8, (a0), v13, v0.t 9668; CHECK-NEXT: ret 9669entry: 9670 tail call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 4) 9671 ret void 9672} 9673 9674 9675define void @test_vsoxseg5_nxv1f16_triscv.vector.tuple_nxv2i8_5t_nxv1i32(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) %val, ptr %base, <vscale x 1 x i32> %index, i64 %vl) { 9676; CHECK-LABEL: test_vsoxseg5_nxv1f16_triscv.vector.tuple_nxv2i8_5t_nxv1i32: 9677; CHECK: # %bb.0: # %entry 9678; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma 9679; CHECK-NEXT: vsoxseg5ei32.v v8, (a0), v13 9680; CHECK-NEXT: ret 9681entry: 9682 tail call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i32(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) %val, ptr %base, <vscale x 1 x i32> %index, i64 %vl, i64 4) 9683 ret void 9684} 9685 9686define void @test_vsoxseg5_mask_nxv1f16_triscv.vector.tuple_nxv2i8_5t_nxv1i32(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) %val, ptr %base, <vscale x 1 x i32> %index, i64 %vl, <vscale x 1 x i1> %mask) { 9687; CHECK-LABEL: test_vsoxseg5_mask_nxv1f16_triscv.vector.tuple_nxv2i8_5t_nxv1i32: 9688; CHECK: # %bb.0: # %entry 9689; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma 9690; CHECK-NEXT: vsoxseg5ei32.v v8, (a0), v13, v0.t 9691; CHECK-NEXT: ret 9692entry: 9693 tail call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 4) 9694 ret void 9695} 9696 9697 9698define void @test_vsoxseg5_nxv1f16_triscv.vector.tuple_nxv2i8_5t_nxv1i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) %val, ptr %base, <vscale x 1 x i64> %index, i64 %vl) { 9699; CHECK-LABEL: test_vsoxseg5_nxv1f16_triscv.vector.tuple_nxv2i8_5t_nxv1i64: 9700; CHECK: # %bb.0: # %entry 9701; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma 9702; CHECK-NEXT: vsoxseg5ei64.v v8, (a0), v13 9703; CHECK-NEXT: ret 9704entry: 9705 tail call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) %val, ptr %base, <vscale x 1 x i64> %index, i64 %vl, i64 4) 9706 ret void 9707} 9708 9709define void @test_vsoxseg5_mask_nxv1f16_triscv.vector.tuple_nxv2i8_5t_nxv1i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) %val, ptr %base, <vscale x 1 x i64> %index, i64 %vl, <vscale x 1 x i1> %mask) { 9710; CHECK-LABEL: test_vsoxseg5_mask_nxv1f16_triscv.vector.tuple_nxv2i8_5t_nxv1i64: 9711; CHECK: # %bb.0: # %entry 9712; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma 9713; CHECK-NEXT: vsoxseg5ei64.v v8, (a0), v13, v0.t 9714; CHECK-NEXT: ret 9715entry: 9716 tail call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i64.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) %val, ptr %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 4) 9717 ret void 9718} 9719 9720 9721define void @test_vsoxseg5_nxv2f16_triscv.vector.tuple_nxv4i8_5t_nxv2i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) %val, ptr %base, <vscale x 2 x i8> %index, i64 %vl) { 9722; CHECK-LABEL: test_vsoxseg5_nxv2f16_triscv.vector.tuple_nxv4i8_5t_nxv2i8: 9723; CHECK: # %bb.0: # %entry 9724; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma 9725; CHECK-NEXT: vsoxseg5ei8.v v8, (a0), v13 9726; CHECK-NEXT: ret 9727entry: 9728 tail call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) %val, ptr %base, <vscale x 2 x i8> %index, i64 %vl, i64 4) 9729 ret void 9730} 9731 9732define void @test_vsoxseg5_mask_nxv2f16_triscv.vector.tuple_nxv4i8_5t_nxv2i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) %val, ptr %base, <vscale x 2 x i8> %index, i64 %vl, <vscale x 2 x i1> %mask) { 9733; CHECK-LABEL: test_vsoxseg5_mask_nxv2f16_triscv.vector.tuple_nxv4i8_5t_nxv2i8: 9734; CHECK: # %bb.0: # %entry 9735; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma 9736; CHECK-NEXT: vsoxseg5ei8.v v8, (a0), v13, v0.t 9737; CHECK-NEXT: ret 9738entry: 9739 tail call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 4) 9740 ret void 9741} 9742 9743 9744define void @test_vsoxseg5_nxv2f16_triscv.vector.tuple_nxv4i8_5t_nxv2i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) %val, ptr %base, <vscale x 2 x i16> %index, i64 %vl) { 9745; CHECK-LABEL: test_vsoxseg5_nxv2f16_triscv.vector.tuple_nxv4i8_5t_nxv2i16: 9746; CHECK: # %bb.0: # %entry 9747; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma 9748; CHECK-NEXT: vsoxseg5ei16.v v8, (a0), v13 9749; CHECK-NEXT: ret 9750entry: 9751 tail call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) %val, ptr %base, <vscale x 2 x i16> %index, i64 %vl, i64 4) 9752 ret void 9753} 9754 9755define void @test_vsoxseg5_mask_nxv2f16_triscv.vector.tuple_nxv4i8_5t_nxv2i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) %val, ptr %base, <vscale x 2 x i16> %index, i64 %vl, <vscale x 2 x i1> %mask) { 9756; CHECK-LABEL: test_vsoxseg5_mask_nxv2f16_triscv.vector.tuple_nxv4i8_5t_nxv2i16: 9757; CHECK: # %bb.0: # %entry 9758; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma 9759; CHECK-NEXT: vsoxseg5ei16.v v8, (a0), v13, v0.t 9760; CHECK-NEXT: ret 9761entry: 9762 tail call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 4) 9763 ret void 9764} 9765 9766 9767define void @test_vsoxseg5_nxv2f16_triscv.vector.tuple_nxv4i8_5t_nxv2i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) %val, ptr %base, <vscale x 2 x i32> %index, i64 %vl) { 9768; CHECK-LABEL: test_vsoxseg5_nxv2f16_triscv.vector.tuple_nxv4i8_5t_nxv2i32: 9769; CHECK: # %bb.0: # %entry 9770; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma 9771; CHECK-NEXT: vsoxseg5ei32.v v8, (a0), v13 9772; CHECK-NEXT: ret 9773entry: 9774 tail call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) %val, ptr %base, <vscale x 2 x i32> %index, i64 %vl, i64 4) 9775 ret void 9776} 9777 9778define void @test_vsoxseg5_mask_nxv2f16_triscv.vector.tuple_nxv4i8_5t_nxv2i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) %val, ptr %base, <vscale x 2 x i32> %index, i64 %vl, <vscale x 2 x i1> %mask) { 9779; CHECK-LABEL: test_vsoxseg5_mask_nxv2f16_triscv.vector.tuple_nxv4i8_5t_nxv2i32: 9780; CHECK: # %bb.0: # %entry 9781; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma 9782; CHECK-NEXT: vsoxseg5ei32.v v8, (a0), v13, v0.t 9783; CHECK-NEXT: ret 9784entry: 9785 tail call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 4) 9786 ret void 9787} 9788 9789 9790define void @test_vsoxseg5_nxv2f16_triscv.vector.tuple_nxv4i8_5t_nxv2i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl) { 9791; CHECK-LABEL: test_vsoxseg5_nxv2f16_triscv.vector.tuple_nxv4i8_5t_nxv2i64: 9792; CHECK: # %bb.0: # %entry 9793; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma 9794; CHECK-NEXT: vsoxseg5ei64.v v8, (a0), v14 9795; CHECK-NEXT: ret 9796entry: 9797 tail call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl, i64 4) 9798 ret void 9799} 9800 9801define void @test_vsoxseg5_mask_nxv2f16_triscv.vector.tuple_nxv4i8_5t_nxv2i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl, <vscale x 2 x i1> %mask) { 9802; CHECK-LABEL: test_vsoxseg5_mask_nxv2f16_triscv.vector.tuple_nxv4i8_5t_nxv2i64: 9803; CHECK: # %bb.0: # %entry 9804; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma 9805; CHECK-NEXT: vsoxseg5ei64.v v8, (a0), v14, v0.t 9806; CHECK-NEXT: ret 9807entry: 9808 tail call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i64.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) %val, ptr %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 4) 9809 ret void 9810} 9811 9812 9813define void @test_vsoxseg5_nxv4f16_triscv.vector.tuple_nxv8i8_5t_nxv4i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, <vscale x 4 x i8> %index, i64 %vl) { 9814; CHECK-LABEL: test_vsoxseg5_nxv4f16_triscv.vector.tuple_nxv8i8_5t_nxv4i8: 9815; CHECK: # %bb.0: # %entry 9816; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma 9817; CHECK-NEXT: vsoxseg5ei8.v v8, (a0), v13 9818; CHECK-NEXT: ret 9819entry: 9820 tail call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, <vscale x 4 x i8> %index, i64 %vl, i64 4) 9821 ret void 9822} 9823 9824define void @test_vsoxseg5_mask_nxv4f16_triscv.vector.tuple_nxv8i8_5t_nxv4i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, <vscale x 4 x i8> %index, i64 %vl, <vscale x 4 x i1> %mask) { 9825; CHECK-LABEL: test_vsoxseg5_mask_nxv4f16_triscv.vector.tuple_nxv8i8_5t_nxv4i8: 9826; CHECK: # %bb.0: # %entry 9827; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma 9828; CHECK-NEXT: vsoxseg5ei8.v v8, (a0), v13, v0.t 9829; CHECK-NEXT: ret 9830entry: 9831 tail call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 4) 9832 ret void 9833} 9834 9835 9836define void @test_vsoxseg5_nxv4f16_triscv.vector.tuple_nxv8i8_5t_nxv4i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, <vscale x 4 x i16> %index, i64 %vl) { 9837; CHECK-LABEL: test_vsoxseg5_nxv4f16_triscv.vector.tuple_nxv8i8_5t_nxv4i16: 9838; CHECK: # %bb.0: # %entry 9839; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma 9840; CHECK-NEXT: vsoxseg5ei16.v v8, (a0), v13 9841; CHECK-NEXT: ret 9842entry: 9843 tail call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, <vscale x 4 x i16> %index, i64 %vl, i64 4) 9844 ret void 9845} 9846 9847define void @test_vsoxseg5_mask_nxv4f16_triscv.vector.tuple_nxv8i8_5t_nxv4i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, <vscale x 4 x i16> %index, i64 %vl, <vscale x 4 x i1> %mask) { 9848; CHECK-LABEL: test_vsoxseg5_mask_nxv4f16_triscv.vector.tuple_nxv8i8_5t_nxv4i16: 9849; CHECK: # %bb.0: # %entry 9850; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma 9851; CHECK-NEXT: vsoxseg5ei16.v v8, (a0), v13, v0.t 9852; CHECK-NEXT: ret 9853entry: 9854 tail call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 4) 9855 ret void 9856} 9857 9858 9859define void @test_vsoxseg5_nxv4f16_triscv.vector.tuple_nxv8i8_5t_nxv4i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, <vscale x 4 x i32> %index, i64 %vl) { 9860; CHECK-LABEL: test_vsoxseg5_nxv4f16_triscv.vector.tuple_nxv8i8_5t_nxv4i32: 9861; CHECK: # %bb.0: # %entry 9862; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma 9863; CHECK-NEXT: vsoxseg5ei32.v v8, (a0), v14 9864; CHECK-NEXT: ret 9865entry: 9866 tail call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, <vscale x 4 x i32> %index, i64 %vl, i64 4) 9867 ret void 9868} 9869 9870define void @test_vsoxseg5_mask_nxv4f16_triscv.vector.tuple_nxv8i8_5t_nxv4i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, <vscale x 4 x i32> %index, i64 %vl, <vscale x 4 x i1> %mask) { 9871; CHECK-LABEL: test_vsoxseg5_mask_nxv4f16_triscv.vector.tuple_nxv8i8_5t_nxv4i32: 9872; CHECK: # %bb.0: # %entry 9873; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma 9874; CHECK-NEXT: vsoxseg5ei32.v v8, (a0), v14, v0.t 9875; CHECK-NEXT: ret 9876entry: 9877 tail call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 4) 9878 ret void 9879} 9880 9881 9882define void @test_vsoxseg5_nxv4f16_triscv.vector.tuple_nxv8i8_5t_nxv4i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, <vscale x 4 x i64> %index, i64 %vl) { 9883; CHECK-LABEL: test_vsoxseg5_nxv4f16_triscv.vector.tuple_nxv8i8_5t_nxv4i64: 9884; CHECK: # %bb.0: # %entry 9885; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma 9886; CHECK-NEXT: vsoxseg5ei64.v v8, (a0), v16 9887; CHECK-NEXT: ret 9888entry: 9889 tail call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, <vscale x 4 x i64> %index, i64 %vl, i64 4) 9890 ret void 9891} 9892 9893define void @test_vsoxseg5_mask_nxv4f16_triscv.vector.tuple_nxv8i8_5t_nxv4i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, <vscale x 4 x i64> %index, i64 %vl, <vscale x 4 x i1> %mask) { 9894; CHECK-LABEL: test_vsoxseg5_mask_nxv4f16_triscv.vector.tuple_nxv8i8_5t_nxv4i64: 9895; CHECK: # %bb.0: # %entry 9896; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma 9897; CHECK-NEXT: vsoxseg5ei64.v v8, (a0), v16, v0.t 9898; CHECK-NEXT: ret 9899entry: 9900 tail call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i64.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 4) 9901 ret void 9902} 9903 9904 9905define void @test_vsoxseg6_nxv1f16_triscv.vector.tuple_nxv2i8_6t_nxv1i8(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) %val, ptr %base, <vscale x 1 x i8> %index, i64 %vl) { 9906; CHECK-LABEL: test_vsoxseg6_nxv1f16_triscv.vector.tuple_nxv2i8_6t_nxv1i8: 9907; CHECK: # %bb.0: # %entry 9908; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma 9909; CHECK-NEXT: vsoxseg6ei8.v v8, (a0), v14 9910; CHECK-NEXT: ret 9911entry: 9912 tail call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i8(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) %val, ptr %base, <vscale x 1 x i8> %index, i64 %vl, i64 4) 9913 ret void 9914} 9915 9916define void @test_vsoxseg6_mask_nxv1f16_triscv.vector.tuple_nxv2i8_6t_nxv1i8(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) %val, ptr %base, <vscale x 1 x i8> %index, i64 %vl, <vscale x 1 x i1> %mask) { 9917; CHECK-LABEL: test_vsoxseg6_mask_nxv1f16_triscv.vector.tuple_nxv2i8_6t_nxv1i8: 9918; CHECK: # %bb.0: # %entry 9919; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma 9920; CHECK-NEXT: vsoxseg6ei8.v v8, (a0), v14, v0.t 9921; CHECK-NEXT: ret 9922entry: 9923 tail call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 4) 9924 ret void 9925} 9926 9927 9928define void @test_vsoxseg6_nxv1f16_triscv.vector.tuple_nxv2i8_6t_nxv1i16(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl) { 9929; CHECK-LABEL: test_vsoxseg6_nxv1f16_triscv.vector.tuple_nxv2i8_6t_nxv1i16: 9930; CHECK: # %bb.0: # %entry 9931; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma 9932; CHECK-NEXT: vsoxseg6ei16.v v8, (a0), v14 9933; CHECK-NEXT: ret 9934entry: 9935 tail call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i16(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl, i64 4) 9936 ret void 9937} 9938 9939define void @test_vsoxseg6_mask_nxv1f16_triscv.vector.tuple_nxv2i8_6t_nxv1i16(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl, <vscale x 1 x i1> %mask) { 9940; CHECK-LABEL: test_vsoxseg6_mask_nxv1f16_triscv.vector.tuple_nxv2i8_6t_nxv1i16: 9941; CHECK: # %bb.0: # %entry 9942; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma 9943; CHECK-NEXT: vsoxseg6ei16.v v8, (a0), v14, v0.t 9944; CHECK-NEXT: ret 9945entry: 9946 tail call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 4) 9947 ret void 9948} 9949 9950 9951define void @test_vsoxseg6_nxv1f16_triscv.vector.tuple_nxv2i8_6t_nxv1i32(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) %val, ptr %base, <vscale x 1 x i32> %index, i64 %vl) { 9952; CHECK-LABEL: test_vsoxseg6_nxv1f16_triscv.vector.tuple_nxv2i8_6t_nxv1i32: 9953; CHECK: # %bb.0: # %entry 9954; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma 9955; CHECK-NEXT: vsoxseg6ei32.v v8, (a0), v14 9956; CHECK-NEXT: ret 9957entry: 9958 tail call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i32(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) %val, ptr %base, <vscale x 1 x i32> %index, i64 %vl, i64 4) 9959 ret void 9960} 9961 9962define void @test_vsoxseg6_mask_nxv1f16_triscv.vector.tuple_nxv2i8_6t_nxv1i32(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) %val, ptr %base, <vscale x 1 x i32> %index, i64 %vl, <vscale x 1 x i1> %mask) { 9963; CHECK-LABEL: test_vsoxseg6_mask_nxv1f16_triscv.vector.tuple_nxv2i8_6t_nxv1i32: 9964; CHECK: # %bb.0: # %entry 9965; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma 9966; CHECK-NEXT: vsoxseg6ei32.v v8, (a0), v14, v0.t 9967; CHECK-NEXT: ret 9968entry: 9969 tail call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 4) 9970 ret void 9971} 9972 9973 9974define void @test_vsoxseg6_nxv1f16_triscv.vector.tuple_nxv2i8_6t_nxv1i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) %val, ptr %base, <vscale x 1 x i64> %index, i64 %vl) { 9975; CHECK-LABEL: test_vsoxseg6_nxv1f16_triscv.vector.tuple_nxv2i8_6t_nxv1i64: 9976; CHECK: # %bb.0: # %entry 9977; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma 9978; CHECK-NEXT: vsoxseg6ei64.v v8, (a0), v14 9979; CHECK-NEXT: ret 9980entry: 9981 tail call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) %val, ptr %base, <vscale x 1 x i64> %index, i64 %vl, i64 4) 9982 ret void 9983} 9984 9985define void @test_vsoxseg6_mask_nxv1f16_triscv.vector.tuple_nxv2i8_6t_nxv1i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) %val, ptr %base, <vscale x 1 x i64> %index, i64 %vl, <vscale x 1 x i1> %mask) { 9986; CHECK-LABEL: test_vsoxseg6_mask_nxv1f16_triscv.vector.tuple_nxv2i8_6t_nxv1i64: 9987; CHECK: # %bb.0: # %entry 9988; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma 9989; CHECK-NEXT: vsoxseg6ei64.v v8, (a0), v14, v0.t 9990; CHECK-NEXT: ret 9991entry: 9992 tail call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i64.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) %val, ptr %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 4) 9993 ret void 9994} 9995 9996 9997define void @test_vsoxseg6_nxv2f16_triscv.vector.tuple_nxv4i8_6t_nxv2i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) %val, ptr %base, <vscale x 2 x i8> %index, i64 %vl) { 9998; CHECK-LABEL: test_vsoxseg6_nxv2f16_triscv.vector.tuple_nxv4i8_6t_nxv2i8: 9999; CHECK: # %bb.0: # %entry 10000; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma 10001; CHECK-NEXT: vsoxseg6ei8.v v8, (a0), v14 10002; CHECK-NEXT: ret 10003entry: 10004 tail call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) %val, ptr %base, <vscale x 2 x i8> %index, i64 %vl, i64 4) 10005 ret void 10006} 10007 10008define void @test_vsoxseg6_mask_nxv2f16_triscv.vector.tuple_nxv4i8_6t_nxv2i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) %val, ptr %base, <vscale x 2 x i8> %index, i64 %vl, <vscale x 2 x i1> %mask) { 10009; CHECK-LABEL: test_vsoxseg6_mask_nxv2f16_triscv.vector.tuple_nxv4i8_6t_nxv2i8: 10010; CHECK: # %bb.0: # %entry 10011; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma 10012; CHECK-NEXT: vsoxseg6ei8.v v8, (a0), v14, v0.t 10013; CHECK-NEXT: ret 10014entry: 10015 tail call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 4) 10016 ret void 10017} 10018 10019 10020define void @test_vsoxseg6_nxv2f16_triscv.vector.tuple_nxv4i8_6t_nxv2i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) %val, ptr %base, <vscale x 2 x i16> %index, i64 %vl) { 10021; CHECK-LABEL: test_vsoxseg6_nxv2f16_triscv.vector.tuple_nxv4i8_6t_nxv2i16: 10022; CHECK: # %bb.0: # %entry 10023; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma 10024; CHECK-NEXT: vsoxseg6ei16.v v8, (a0), v14 10025; CHECK-NEXT: ret 10026entry: 10027 tail call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) %val, ptr %base, <vscale x 2 x i16> %index, i64 %vl, i64 4) 10028 ret void 10029} 10030 10031define void @test_vsoxseg6_mask_nxv2f16_triscv.vector.tuple_nxv4i8_6t_nxv2i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) %val, ptr %base, <vscale x 2 x i16> %index, i64 %vl, <vscale x 2 x i1> %mask) { 10032; CHECK-LABEL: test_vsoxseg6_mask_nxv2f16_triscv.vector.tuple_nxv4i8_6t_nxv2i16: 10033; CHECK: # %bb.0: # %entry 10034; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma 10035; CHECK-NEXT: vsoxseg6ei16.v v8, (a0), v14, v0.t 10036; CHECK-NEXT: ret 10037entry: 10038 tail call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 4) 10039 ret void 10040} 10041 10042 10043define void @test_vsoxseg6_nxv2f16_triscv.vector.tuple_nxv4i8_6t_nxv2i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) %val, ptr %base, <vscale x 2 x i32> %index, i64 %vl) { 10044; CHECK-LABEL: test_vsoxseg6_nxv2f16_triscv.vector.tuple_nxv4i8_6t_nxv2i32: 10045; CHECK: # %bb.0: # %entry 10046; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma 10047; CHECK-NEXT: vsoxseg6ei32.v v8, (a0), v14 10048; CHECK-NEXT: ret 10049entry: 10050 tail call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) %val, ptr %base, <vscale x 2 x i32> %index, i64 %vl, i64 4) 10051 ret void 10052} 10053 10054define void @test_vsoxseg6_mask_nxv2f16_triscv.vector.tuple_nxv4i8_6t_nxv2i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) %val, ptr %base, <vscale x 2 x i32> %index, i64 %vl, <vscale x 2 x i1> %mask) { 10055; CHECK-LABEL: test_vsoxseg6_mask_nxv2f16_triscv.vector.tuple_nxv4i8_6t_nxv2i32: 10056; CHECK: # %bb.0: # %entry 10057; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma 10058; CHECK-NEXT: vsoxseg6ei32.v v8, (a0), v14, v0.t 10059; CHECK-NEXT: ret 10060entry: 10061 tail call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 4) 10062 ret void 10063} 10064 10065 10066define void @test_vsoxseg6_nxv2f16_triscv.vector.tuple_nxv4i8_6t_nxv2i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl) { 10067; CHECK-LABEL: test_vsoxseg6_nxv2f16_triscv.vector.tuple_nxv4i8_6t_nxv2i64: 10068; CHECK: # %bb.0: # %entry 10069; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma 10070; CHECK-NEXT: vsoxseg6ei64.v v8, (a0), v14 10071; CHECK-NEXT: ret 10072entry: 10073 tail call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl, i64 4) 10074 ret void 10075} 10076 10077define void @test_vsoxseg6_mask_nxv2f16_triscv.vector.tuple_nxv4i8_6t_nxv2i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl, <vscale x 2 x i1> %mask) { 10078; CHECK-LABEL: test_vsoxseg6_mask_nxv2f16_triscv.vector.tuple_nxv4i8_6t_nxv2i64: 10079; CHECK: # %bb.0: # %entry 10080; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma 10081; CHECK-NEXT: vsoxseg6ei64.v v8, (a0), v14, v0.t 10082; CHECK-NEXT: ret 10083entry: 10084 tail call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i64.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) %val, ptr %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 4) 10085 ret void 10086} 10087 10088 10089define void @test_vsoxseg6_nxv4f16_triscv.vector.tuple_nxv8i8_6t_nxv4i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, <vscale x 4 x i8> %index, i64 %vl) { 10090; CHECK-LABEL: test_vsoxseg6_nxv4f16_triscv.vector.tuple_nxv8i8_6t_nxv4i8: 10091; CHECK: # %bb.0: # %entry 10092; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma 10093; CHECK-NEXT: vsoxseg6ei8.v v8, (a0), v14 10094; CHECK-NEXT: ret 10095entry: 10096 tail call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, <vscale x 4 x i8> %index, i64 %vl, i64 4) 10097 ret void 10098} 10099 10100define void @test_vsoxseg6_mask_nxv4f16_triscv.vector.tuple_nxv8i8_6t_nxv4i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, <vscale x 4 x i8> %index, i64 %vl, <vscale x 4 x i1> %mask) { 10101; CHECK-LABEL: test_vsoxseg6_mask_nxv4f16_triscv.vector.tuple_nxv8i8_6t_nxv4i8: 10102; CHECK: # %bb.0: # %entry 10103; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma 10104; CHECK-NEXT: vsoxseg6ei8.v v8, (a0), v14, v0.t 10105; CHECK-NEXT: ret 10106entry: 10107 tail call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 4) 10108 ret void 10109} 10110 10111 10112define void @test_vsoxseg6_nxv4f16_triscv.vector.tuple_nxv8i8_6t_nxv4i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, <vscale x 4 x i16> %index, i64 %vl) { 10113; CHECK-LABEL: test_vsoxseg6_nxv4f16_triscv.vector.tuple_nxv8i8_6t_nxv4i16: 10114; CHECK: # %bb.0: # %entry 10115; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma 10116; CHECK-NEXT: vsoxseg6ei16.v v8, (a0), v14 10117; CHECK-NEXT: ret 10118entry: 10119 tail call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, <vscale x 4 x i16> %index, i64 %vl, i64 4) 10120 ret void 10121} 10122 10123define void @test_vsoxseg6_mask_nxv4f16_triscv.vector.tuple_nxv8i8_6t_nxv4i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, <vscale x 4 x i16> %index, i64 %vl, <vscale x 4 x i1> %mask) { 10124; CHECK-LABEL: test_vsoxseg6_mask_nxv4f16_triscv.vector.tuple_nxv8i8_6t_nxv4i16: 10125; CHECK: # %bb.0: # %entry 10126; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma 10127; CHECK-NEXT: vsoxseg6ei16.v v8, (a0), v14, v0.t 10128; CHECK-NEXT: ret 10129entry: 10130 tail call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 4) 10131 ret void 10132} 10133 10134 10135define void @test_vsoxseg6_nxv4f16_triscv.vector.tuple_nxv8i8_6t_nxv4i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, <vscale x 4 x i32> %index, i64 %vl) { 10136; CHECK-LABEL: test_vsoxseg6_nxv4f16_triscv.vector.tuple_nxv8i8_6t_nxv4i32: 10137; CHECK: # %bb.0: # %entry 10138; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma 10139; CHECK-NEXT: vsoxseg6ei32.v v8, (a0), v14 10140; CHECK-NEXT: ret 10141entry: 10142 tail call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, <vscale x 4 x i32> %index, i64 %vl, i64 4) 10143 ret void 10144} 10145 10146define void @test_vsoxseg6_mask_nxv4f16_triscv.vector.tuple_nxv8i8_6t_nxv4i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, <vscale x 4 x i32> %index, i64 %vl, <vscale x 4 x i1> %mask) { 10147; CHECK-LABEL: test_vsoxseg6_mask_nxv4f16_triscv.vector.tuple_nxv8i8_6t_nxv4i32: 10148; CHECK: # %bb.0: # %entry 10149; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma 10150; CHECK-NEXT: vsoxseg6ei32.v v8, (a0), v14, v0.t 10151; CHECK-NEXT: ret 10152entry: 10153 tail call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 4) 10154 ret void 10155} 10156 10157 10158define void @test_vsoxseg6_nxv4f16_triscv.vector.tuple_nxv8i8_6t_nxv4i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, <vscale x 4 x i64> %index, i64 %vl) { 10159; CHECK-LABEL: test_vsoxseg6_nxv4f16_triscv.vector.tuple_nxv8i8_6t_nxv4i64: 10160; CHECK: # %bb.0: # %entry 10161; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma 10162; CHECK-NEXT: vsoxseg6ei64.v v8, (a0), v16 10163; CHECK-NEXT: ret 10164entry: 10165 tail call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, <vscale x 4 x i64> %index, i64 %vl, i64 4) 10166 ret void 10167} 10168 10169define void @test_vsoxseg6_mask_nxv4f16_triscv.vector.tuple_nxv8i8_6t_nxv4i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, <vscale x 4 x i64> %index, i64 %vl, <vscale x 4 x i1> %mask) { 10170; CHECK-LABEL: test_vsoxseg6_mask_nxv4f16_triscv.vector.tuple_nxv8i8_6t_nxv4i64: 10171; CHECK: # %bb.0: # %entry 10172; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma 10173; CHECK-NEXT: vsoxseg6ei64.v v8, (a0), v16, v0.t 10174; CHECK-NEXT: ret 10175entry: 10176 tail call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i64.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 4) 10177 ret void 10178} 10179 10180 10181define void @test_vsoxseg7_nxv1f16_triscv.vector.tuple_nxv2i8_7t_nxv1i8(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) %val, ptr %base, <vscale x 1 x i8> %index, i64 %vl) { 10182; CHECK-LABEL: test_vsoxseg7_nxv1f16_triscv.vector.tuple_nxv2i8_7t_nxv1i8: 10183; CHECK: # %bb.0: # %entry 10184; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma 10185; CHECK-NEXT: vsoxseg7ei8.v v8, (a0), v15 10186; CHECK-NEXT: ret 10187entry: 10188 tail call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i8(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) %val, ptr %base, <vscale x 1 x i8> %index, i64 %vl, i64 4) 10189 ret void 10190} 10191 10192define void @test_vsoxseg7_mask_nxv1f16_triscv.vector.tuple_nxv2i8_7t_nxv1i8(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) %val, ptr %base, <vscale x 1 x i8> %index, i64 %vl, <vscale x 1 x i1> %mask) { 10193; CHECK-LABEL: test_vsoxseg7_mask_nxv1f16_triscv.vector.tuple_nxv2i8_7t_nxv1i8: 10194; CHECK: # %bb.0: # %entry 10195; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma 10196; CHECK-NEXT: vsoxseg7ei8.v v8, (a0), v15, v0.t 10197; CHECK-NEXT: ret 10198entry: 10199 tail call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 4) 10200 ret void 10201} 10202 10203 10204define void @test_vsoxseg7_nxv1f16_triscv.vector.tuple_nxv2i8_7t_nxv1i16(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl) { 10205; CHECK-LABEL: test_vsoxseg7_nxv1f16_triscv.vector.tuple_nxv2i8_7t_nxv1i16: 10206; CHECK: # %bb.0: # %entry 10207; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma 10208; CHECK-NEXT: vsoxseg7ei16.v v8, (a0), v15 10209; CHECK-NEXT: ret 10210entry: 10211 tail call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i16(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl, i64 4) 10212 ret void 10213} 10214 10215define void @test_vsoxseg7_mask_nxv1f16_triscv.vector.tuple_nxv2i8_7t_nxv1i16(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl, <vscale x 1 x i1> %mask) { 10216; CHECK-LABEL: test_vsoxseg7_mask_nxv1f16_triscv.vector.tuple_nxv2i8_7t_nxv1i16: 10217; CHECK: # %bb.0: # %entry 10218; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma 10219; CHECK-NEXT: vsoxseg7ei16.v v8, (a0), v15, v0.t 10220; CHECK-NEXT: ret 10221entry: 10222 tail call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 4) 10223 ret void 10224} 10225 10226 10227define void @test_vsoxseg7_nxv1f16_triscv.vector.tuple_nxv2i8_7t_nxv1i32(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) %val, ptr %base, <vscale x 1 x i32> %index, i64 %vl) { 10228; CHECK-LABEL: test_vsoxseg7_nxv1f16_triscv.vector.tuple_nxv2i8_7t_nxv1i32: 10229; CHECK: # %bb.0: # %entry 10230; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma 10231; CHECK-NEXT: vsoxseg7ei32.v v8, (a0), v15 10232; CHECK-NEXT: ret 10233entry: 10234 tail call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i32(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) %val, ptr %base, <vscale x 1 x i32> %index, i64 %vl, i64 4) 10235 ret void 10236} 10237 10238define void @test_vsoxseg7_mask_nxv1f16_triscv.vector.tuple_nxv2i8_7t_nxv1i32(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) %val, ptr %base, <vscale x 1 x i32> %index, i64 %vl, <vscale x 1 x i1> %mask) { 10239; CHECK-LABEL: test_vsoxseg7_mask_nxv1f16_triscv.vector.tuple_nxv2i8_7t_nxv1i32: 10240; CHECK: # %bb.0: # %entry 10241; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma 10242; CHECK-NEXT: vsoxseg7ei32.v v8, (a0), v15, v0.t 10243; CHECK-NEXT: ret 10244entry: 10245 tail call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 4) 10246 ret void 10247} 10248 10249 10250define void @test_vsoxseg7_nxv1f16_triscv.vector.tuple_nxv2i8_7t_nxv1i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) %val, ptr %base, <vscale x 1 x i64> %index, i64 %vl) { 10251; CHECK-LABEL: test_vsoxseg7_nxv1f16_triscv.vector.tuple_nxv2i8_7t_nxv1i64: 10252; CHECK: # %bb.0: # %entry 10253; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma 10254; CHECK-NEXT: vsoxseg7ei64.v v8, (a0), v15 10255; CHECK-NEXT: ret 10256entry: 10257 tail call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) %val, ptr %base, <vscale x 1 x i64> %index, i64 %vl, i64 4) 10258 ret void 10259} 10260 10261define void @test_vsoxseg7_mask_nxv1f16_triscv.vector.tuple_nxv2i8_7t_nxv1i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) %val, ptr %base, <vscale x 1 x i64> %index, i64 %vl, <vscale x 1 x i1> %mask) { 10262; CHECK-LABEL: test_vsoxseg7_mask_nxv1f16_triscv.vector.tuple_nxv2i8_7t_nxv1i64: 10263; CHECK: # %bb.0: # %entry 10264; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma 10265; CHECK-NEXT: vsoxseg7ei64.v v8, (a0), v15, v0.t 10266; CHECK-NEXT: ret 10267entry: 10268 tail call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i64.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) %val, ptr %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 4) 10269 ret void 10270} 10271 10272 10273define void @test_vsoxseg7_nxv2f16_triscv.vector.tuple_nxv4i8_7t_nxv2i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) %val, ptr %base, <vscale x 2 x i8> %index, i64 %vl) { 10274; CHECK-LABEL: test_vsoxseg7_nxv2f16_triscv.vector.tuple_nxv4i8_7t_nxv2i8: 10275; CHECK: # %bb.0: # %entry 10276; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma 10277; CHECK-NEXT: vsoxseg7ei8.v v8, (a0), v15 10278; CHECK-NEXT: ret 10279entry: 10280 tail call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) %val, ptr %base, <vscale x 2 x i8> %index, i64 %vl, i64 4) 10281 ret void 10282} 10283 10284define void @test_vsoxseg7_mask_nxv2f16_triscv.vector.tuple_nxv4i8_7t_nxv2i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) %val, ptr %base, <vscale x 2 x i8> %index, i64 %vl, <vscale x 2 x i1> %mask) { 10285; CHECK-LABEL: test_vsoxseg7_mask_nxv2f16_triscv.vector.tuple_nxv4i8_7t_nxv2i8: 10286; CHECK: # %bb.0: # %entry 10287; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma 10288; CHECK-NEXT: vsoxseg7ei8.v v8, (a0), v15, v0.t 10289; CHECK-NEXT: ret 10290entry: 10291 tail call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 4) 10292 ret void 10293} 10294 10295 10296define void @test_vsoxseg7_nxv2f16_triscv.vector.tuple_nxv4i8_7t_nxv2i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) %val, ptr %base, <vscale x 2 x i16> %index, i64 %vl) { 10297; CHECK-LABEL: test_vsoxseg7_nxv2f16_triscv.vector.tuple_nxv4i8_7t_nxv2i16: 10298; CHECK: # %bb.0: # %entry 10299; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma 10300; CHECK-NEXT: vsoxseg7ei16.v v8, (a0), v15 10301; CHECK-NEXT: ret 10302entry: 10303 tail call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) %val, ptr %base, <vscale x 2 x i16> %index, i64 %vl, i64 4) 10304 ret void 10305} 10306 10307define void @test_vsoxseg7_mask_nxv2f16_triscv.vector.tuple_nxv4i8_7t_nxv2i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) %val, ptr %base, <vscale x 2 x i16> %index, i64 %vl, <vscale x 2 x i1> %mask) { 10308; CHECK-LABEL: test_vsoxseg7_mask_nxv2f16_triscv.vector.tuple_nxv4i8_7t_nxv2i16: 10309; CHECK: # %bb.0: # %entry 10310; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma 10311; CHECK-NEXT: vsoxseg7ei16.v v8, (a0), v15, v0.t 10312; CHECK-NEXT: ret 10313entry: 10314 tail call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 4) 10315 ret void 10316} 10317 10318 10319define void @test_vsoxseg7_nxv2f16_triscv.vector.tuple_nxv4i8_7t_nxv2i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) %val, ptr %base, <vscale x 2 x i32> %index, i64 %vl) { 10320; CHECK-LABEL: test_vsoxseg7_nxv2f16_triscv.vector.tuple_nxv4i8_7t_nxv2i32: 10321; CHECK: # %bb.0: # %entry 10322; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma 10323; CHECK-NEXT: vsoxseg7ei32.v v8, (a0), v15 10324; CHECK-NEXT: ret 10325entry: 10326 tail call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) %val, ptr %base, <vscale x 2 x i32> %index, i64 %vl, i64 4) 10327 ret void 10328} 10329 10330define void @test_vsoxseg7_mask_nxv2f16_triscv.vector.tuple_nxv4i8_7t_nxv2i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) %val, ptr %base, <vscale x 2 x i32> %index, i64 %vl, <vscale x 2 x i1> %mask) { 10331; CHECK-LABEL: test_vsoxseg7_mask_nxv2f16_triscv.vector.tuple_nxv4i8_7t_nxv2i32: 10332; CHECK: # %bb.0: # %entry 10333; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma 10334; CHECK-NEXT: vsoxseg7ei32.v v8, (a0), v15, v0.t 10335; CHECK-NEXT: ret 10336entry: 10337 tail call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 4) 10338 ret void 10339} 10340 10341 10342define void @test_vsoxseg7_nxv2f16_triscv.vector.tuple_nxv4i8_7t_nxv2i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl) { 10343; CHECK-LABEL: test_vsoxseg7_nxv2f16_triscv.vector.tuple_nxv4i8_7t_nxv2i64: 10344; CHECK: # %bb.0: # %entry 10345; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma 10346; CHECK-NEXT: vsoxseg7ei64.v v8, (a0), v16 10347; CHECK-NEXT: ret 10348entry: 10349 tail call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl, i64 4) 10350 ret void 10351} 10352 10353define void @test_vsoxseg7_mask_nxv2f16_triscv.vector.tuple_nxv4i8_7t_nxv2i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl, <vscale x 2 x i1> %mask) { 10354; CHECK-LABEL: test_vsoxseg7_mask_nxv2f16_triscv.vector.tuple_nxv4i8_7t_nxv2i64: 10355; CHECK: # %bb.0: # %entry 10356; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma 10357; CHECK-NEXT: vsoxseg7ei64.v v8, (a0), v16, v0.t 10358; CHECK-NEXT: ret 10359entry: 10360 tail call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i64.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) %val, ptr %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 4) 10361 ret void 10362} 10363 10364 10365define void @test_vsoxseg7_nxv4f16_triscv.vector.tuple_nxv8i8_7t_nxv4i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, <vscale x 4 x i8> %index, i64 %vl) { 10366; CHECK-LABEL: test_vsoxseg7_nxv4f16_triscv.vector.tuple_nxv8i8_7t_nxv4i8: 10367; CHECK: # %bb.0: # %entry 10368; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma 10369; CHECK-NEXT: vsoxseg7ei8.v v8, (a0), v15 10370; CHECK-NEXT: ret 10371entry: 10372 tail call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, <vscale x 4 x i8> %index, i64 %vl, i64 4) 10373 ret void 10374} 10375 10376define void @test_vsoxseg7_mask_nxv4f16_triscv.vector.tuple_nxv8i8_7t_nxv4i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, <vscale x 4 x i8> %index, i64 %vl, <vscale x 4 x i1> %mask) { 10377; CHECK-LABEL: test_vsoxseg7_mask_nxv4f16_triscv.vector.tuple_nxv8i8_7t_nxv4i8: 10378; CHECK: # %bb.0: # %entry 10379; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma 10380; CHECK-NEXT: vsoxseg7ei8.v v8, (a0), v15, v0.t 10381; CHECK-NEXT: ret 10382entry: 10383 tail call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 4) 10384 ret void 10385} 10386 10387 10388define void @test_vsoxseg7_nxv4f16_triscv.vector.tuple_nxv8i8_7t_nxv4i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, <vscale x 4 x i16> %index, i64 %vl) { 10389; CHECK-LABEL: test_vsoxseg7_nxv4f16_triscv.vector.tuple_nxv8i8_7t_nxv4i16: 10390; CHECK: # %bb.0: # %entry 10391; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma 10392; CHECK-NEXT: vsoxseg7ei16.v v8, (a0), v15 10393; CHECK-NEXT: ret 10394entry: 10395 tail call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, <vscale x 4 x i16> %index, i64 %vl, i64 4) 10396 ret void 10397} 10398 10399define void @test_vsoxseg7_mask_nxv4f16_triscv.vector.tuple_nxv8i8_7t_nxv4i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, <vscale x 4 x i16> %index, i64 %vl, <vscale x 4 x i1> %mask) { 10400; CHECK-LABEL: test_vsoxseg7_mask_nxv4f16_triscv.vector.tuple_nxv8i8_7t_nxv4i16: 10401; CHECK: # %bb.0: # %entry 10402; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma 10403; CHECK-NEXT: vsoxseg7ei16.v v8, (a0), v15, v0.t 10404; CHECK-NEXT: ret 10405entry: 10406 tail call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 4) 10407 ret void 10408} 10409 10410 10411define void @test_vsoxseg7_nxv4f16_triscv.vector.tuple_nxv8i8_7t_nxv4i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, <vscale x 4 x i32> %index, i64 %vl) { 10412; CHECK-LABEL: test_vsoxseg7_nxv4f16_triscv.vector.tuple_nxv8i8_7t_nxv4i32: 10413; CHECK: # %bb.0: # %entry 10414; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma 10415; CHECK-NEXT: vsoxseg7ei32.v v8, (a0), v16 10416; CHECK-NEXT: ret 10417entry: 10418 tail call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, <vscale x 4 x i32> %index, i64 %vl, i64 4) 10419 ret void 10420} 10421 10422define void @test_vsoxseg7_mask_nxv4f16_triscv.vector.tuple_nxv8i8_7t_nxv4i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, <vscale x 4 x i32> %index, i64 %vl, <vscale x 4 x i1> %mask) { 10423; CHECK-LABEL: test_vsoxseg7_mask_nxv4f16_triscv.vector.tuple_nxv8i8_7t_nxv4i32: 10424; CHECK: # %bb.0: # %entry 10425; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma 10426; CHECK-NEXT: vsoxseg7ei32.v v8, (a0), v16, v0.t 10427; CHECK-NEXT: ret 10428entry: 10429 tail call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 4) 10430 ret void 10431} 10432 10433 10434define void @test_vsoxseg7_nxv4f16_triscv.vector.tuple_nxv8i8_7t_nxv4i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, <vscale x 4 x i64> %index, i64 %vl) { 10435; CHECK-LABEL: test_vsoxseg7_nxv4f16_triscv.vector.tuple_nxv8i8_7t_nxv4i64: 10436; CHECK: # %bb.0: # %entry 10437; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma 10438; CHECK-NEXT: vsoxseg7ei64.v v8, (a0), v16 10439; CHECK-NEXT: ret 10440entry: 10441 tail call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, <vscale x 4 x i64> %index, i64 %vl, i64 4) 10442 ret void 10443} 10444 10445define void @test_vsoxseg7_mask_nxv4f16_triscv.vector.tuple_nxv8i8_7t_nxv4i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, <vscale x 4 x i64> %index, i64 %vl, <vscale x 4 x i1> %mask) { 10446; CHECK-LABEL: test_vsoxseg7_mask_nxv4f16_triscv.vector.tuple_nxv8i8_7t_nxv4i64: 10447; CHECK: # %bb.0: # %entry 10448; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma 10449; CHECK-NEXT: vsoxseg7ei64.v v8, (a0), v16, v0.t 10450; CHECK-NEXT: ret 10451entry: 10452 tail call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i64.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 4) 10453 ret void 10454} 10455 10456 10457define void @test_vsoxseg8_nxv1f16_triscv.vector.tuple_nxv2i8_8t_nxv1i8(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) %val, ptr %base, <vscale x 1 x i8> %index, i64 %vl) { 10458; CHECK-LABEL: test_vsoxseg8_nxv1f16_triscv.vector.tuple_nxv2i8_8t_nxv1i8: 10459; CHECK: # %bb.0: # %entry 10460; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma 10461; CHECK-NEXT: vsoxseg8ei8.v v8, (a0), v16 10462; CHECK-NEXT: ret 10463entry: 10464 tail call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i8(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) %val, ptr %base, <vscale x 1 x i8> %index, i64 %vl, i64 4) 10465 ret void 10466} 10467 10468define void @test_vsoxseg8_mask_nxv1f16_triscv.vector.tuple_nxv2i8_8t_nxv1i8(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) %val, ptr %base, <vscale x 1 x i8> %index, i64 %vl, <vscale x 1 x i1> %mask) { 10469; CHECK-LABEL: test_vsoxseg8_mask_nxv1f16_triscv.vector.tuple_nxv2i8_8t_nxv1i8: 10470; CHECK: # %bb.0: # %entry 10471; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma 10472; CHECK-NEXT: vsoxseg8ei8.v v8, (a0), v16, v0.t 10473; CHECK-NEXT: ret 10474entry: 10475 tail call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 4) 10476 ret void 10477} 10478 10479 10480define void @test_vsoxseg8_nxv1f16_triscv.vector.tuple_nxv2i8_8t_nxv1i16(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl) { 10481; CHECK-LABEL: test_vsoxseg8_nxv1f16_triscv.vector.tuple_nxv2i8_8t_nxv1i16: 10482; CHECK: # %bb.0: # %entry 10483; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma 10484; CHECK-NEXT: vsoxseg8ei16.v v8, (a0), v16 10485; CHECK-NEXT: ret 10486entry: 10487 tail call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i16(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl, i64 4) 10488 ret void 10489} 10490 10491define void @test_vsoxseg8_mask_nxv1f16_triscv.vector.tuple_nxv2i8_8t_nxv1i16(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl, <vscale x 1 x i1> %mask) { 10492; CHECK-LABEL: test_vsoxseg8_mask_nxv1f16_triscv.vector.tuple_nxv2i8_8t_nxv1i16: 10493; CHECK: # %bb.0: # %entry 10494; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma 10495; CHECK-NEXT: vsoxseg8ei16.v v8, (a0), v16, v0.t 10496; CHECK-NEXT: ret 10497entry: 10498 tail call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 4) 10499 ret void 10500} 10501 10502 10503define void @test_vsoxseg8_nxv1f16_triscv.vector.tuple_nxv2i8_8t_nxv1i32(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) %val, ptr %base, <vscale x 1 x i32> %index, i64 %vl) { 10504; CHECK-LABEL: test_vsoxseg8_nxv1f16_triscv.vector.tuple_nxv2i8_8t_nxv1i32: 10505; CHECK: # %bb.0: # %entry 10506; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma 10507; CHECK-NEXT: vsoxseg8ei32.v v8, (a0), v16 10508; CHECK-NEXT: ret 10509entry: 10510 tail call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i32(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) %val, ptr %base, <vscale x 1 x i32> %index, i64 %vl, i64 4) 10511 ret void 10512} 10513 10514define void @test_vsoxseg8_mask_nxv1f16_triscv.vector.tuple_nxv2i8_8t_nxv1i32(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) %val, ptr %base, <vscale x 1 x i32> %index, i64 %vl, <vscale x 1 x i1> %mask) { 10515; CHECK-LABEL: test_vsoxseg8_mask_nxv1f16_triscv.vector.tuple_nxv2i8_8t_nxv1i32: 10516; CHECK: # %bb.0: # %entry 10517; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma 10518; CHECK-NEXT: vsoxseg8ei32.v v8, (a0), v16, v0.t 10519; CHECK-NEXT: ret 10520entry: 10521 tail call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 4) 10522 ret void 10523} 10524 10525 10526define void @test_vsoxseg8_nxv1f16_triscv.vector.tuple_nxv2i8_8t_nxv1i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) %val, ptr %base, <vscale x 1 x i64> %index, i64 %vl) { 10527; CHECK-LABEL: test_vsoxseg8_nxv1f16_triscv.vector.tuple_nxv2i8_8t_nxv1i64: 10528; CHECK: # %bb.0: # %entry 10529; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma 10530; CHECK-NEXT: vsoxseg8ei64.v v8, (a0), v16 10531; CHECK-NEXT: ret 10532entry: 10533 tail call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) %val, ptr %base, <vscale x 1 x i64> %index, i64 %vl, i64 4) 10534 ret void 10535} 10536 10537define void @test_vsoxseg8_mask_nxv1f16_triscv.vector.tuple_nxv2i8_8t_nxv1i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) %val, ptr %base, <vscale x 1 x i64> %index, i64 %vl, <vscale x 1 x i1> %mask) { 10538; CHECK-LABEL: test_vsoxseg8_mask_nxv1f16_triscv.vector.tuple_nxv2i8_8t_nxv1i64: 10539; CHECK: # %bb.0: # %entry 10540; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma 10541; CHECK-NEXT: vsoxseg8ei64.v v8, (a0), v16, v0.t 10542; CHECK-NEXT: ret 10543entry: 10544 tail call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i64.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) %val, ptr %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 4) 10545 ret void 10546} 10547 10548 10549define void @test_vsoxseg8_nxv2f16_triscv.vector.tuple_nxv4i8_8t_nxv2i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %val, ptr %base, <vscale x 2 x i8> %index, i64 %vl) { 10550; CHECK-LABEL: test_vsoxseg8_nxv2f16_triscv.vector.tuple_nxv4i8_8t_nxv2i8: 10551; CHECK: # %bb.0: # %entry 10552; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma 10553; CHECK-NEXT: vsoxseg8ei8.v v8, (a0), v16 10554; CHECK-NEXT: ret 10555entry: 10556 tail call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %val, ptr %base, <vscale x 2 x i8> %index, i64 %vl, i64 4) 10557 ret void 10558} 10559 10560define void @test_vsoxseg8_mask_nxv2f16_triscv.vector.tuple_nxv4i8_8t_nxv2i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %val, ptr %base, <vscale x 2 x i8> %index, i64 %vl, <vscale x 2 x i1> %mask) { 10561; CHECK-LABEL: test_vsoxseg8_mask_nxv2f16_triscv.vector.tuple_nxv4i8_8t_nxv2i8: 10562; CHECK: # %bb.0: # %entry 10563; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma 10564; CHECK-NEXT: vsoxseg8ei8.v v8, (a0), v16, v0.t 10565; CHECK-NEXT: ret 10566entry: 10567 tail call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 4) 10568 ret void 10569} 10570 10571 10572define void @test_vsoxseg8_nxv2f16_triscv.vector.tuple_nxv4i8_8t_nxv2i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %val, ptr %base, <vscale x 2 x i16> %index, i64 %vl) { 10573; CHECK-LABEL: test_vsoxseg8_nxv2f16_triscv.vector.tuple_nxv4i8_8t_nxv2i16: 10574; CHECK: # %bb.0: # %entry 10575; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma 10576; CHECK-NEXT: vsoxseg8ei16.v v8, (a0), v16 10577; CHECK-NEXT: ret 10578entry: 10579 tail call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %val, ptr %base, <vscale x 2 x i16> %index, i64 %vl, i64 4) 10580 ret void 10581} 10582 10583define void @test_vsoxseg8_mask_nxv2f16_triscv.vector.tuple_nxv4i8_8t_nxv2i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %val, ptr %base, <vscale x 2 x i16> %index, i64 %vl, <vscale x 2 x i1> %mask) { 10584; CHECK-LABEL: test_vsoxseg8_mask_nxv2f16_triscv.vector.tuple_nxv4i8_8t_nxv2i16: 10585; CHECK: # %bb.0: # %entry 10586; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma 10587; CHECK-NEXT: vsoxseg8ei16.v v8, (a0), v16, v0.t 10588; CHECK-NEXT: ret 10589entry: 10590 tail call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 4) 10591 ret void 10592} 10593 10594 10595define void @test_vsoxseg8_nxv2f16_triscv.vector.tuple_nxv4i8_8t_nxv2i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %val, ptr %base, <vscale x 2 x i32> %index, i64 %vl) { 10596; CHECK-LABEL: test_vsoxseg8_nxv2f16_triscv.vector.tuple_nxv4i8_8t_nxv2i32: 10597; CHECK: # %bb.0: # %entry 10598; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma 10599; CHECK-NEXT: vsoxseg8ei32.v v8, (a0), v16 10600; CHECK-NEXT: ret 10601entry: 10602 tail call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %val, ptr %base, <vscale x 2 x i32> %index, i64 %vl, i64 4) 10603 ret void 10604} 10605 10606define void @test_vsoxseg8_mask_nxv2f16_triscv.vector.tuple_nxv4i8_8t_nxv2i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %val, ptr %base, <vscale x 2 x i32> %index, i64 %vl, <vscale x 2 x i1> %mask) { 10607; CHECK-LABEL: test_vsoxseg8_mask_nxv2f16_triscv.vector.tuple_nxv4i8_8t_nxv2i32: 10608; CHECK: # %bb.0: # %entry 10609; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma 10610; CHECK-NEXT: vsoxseg8ei32.v v8, (a0), v16, v0.t 10611; CHECK-NEXT: ret 10612entry: 10613 tail call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 4) 10614 ret void 10615} 10616 10617 10618define void @test_vsoxseg8_nxv2f16_triscv.vector.tuple_nxv4i8_8t_nxv2i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl) { 10619; CHECK-LABEL: test_vsoxseg8_nxv2f16_triscv.vector.tuple_nxv4i8_8t_nxv2i64: 10620; CHECK: # %bb.0: # %entry 10621; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma 10622; CHECK-NEXT: vsoxseg8ei64.v v8, (a0), v16 10623; CHECK-NEXT: ret 10624entry: 10625 tail call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl, i64 4) 10626 ret void 10627} 10628 10629define void @test_vsoxseg8_mask_nxv2f16_triscv.vector.tuple_nxv4i8_8t_nxv2i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl, <vscale x 2 x i1> %mask) { 10630; CHECK-LABEL: test_vsoxseg8_mask_nxv2f16_triscv.vector.tuple_nxv4i8_8t_nxv2i64: 10631; CHECK: # %bb.0: # %entry 10632; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma 10633; CHECK-NEXT: vsoxseg8ei64.v v8, (a0), v16, v0.t 10634; CHECK-NEXT: ret 10635entry: 10636 tail call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i64.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %val, ptr %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 4) 10637 ret void 10638} 10639 10640 10641define void @test_vsoxseg8_nxv4f16_triscv.vector.tuple_nxv8i8_8t_nxv4i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, <vscale x 4 x i8> %index, i64 %vl) { 10642; CHECK-LABEL: test_vsoxseg8_nxv4f16_triscv.vector.tuple_nxv8i8_8t_nxv4i8: 10643; CHECK: # %bb.0: # %entry 10644; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma 10645; CHECK-NEXT: vsoxseg8ei8.v v8, (a0), v16 10646; CHECK-NEXT: ret 10647entry: 10648 tail call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, <vscale x 4 x i8> %index, i64 %vl, i64 4) 10649 ret void 10650} 10651 10652define void @test_vsoxseg8_mask_nxv4f16_triscv.vector.tuple_nxv8i8_8t_nxv4i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, <vscale x 4 x i8> %index, i64 %vl, <vscale x 4 x i1> %mask) { 10653; CHECK-LABEL: test_vsoxseg8_mask_nxv4f16_triscv.vector.tuple_nxv8i8_8t_nxv4i8: 10654; CHECK: # %bb.0: # %entry 10655; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma 10656; CHECK-NEXT: vsoxseg8ei8.v v8, (a0), v16, v0.t 10657; CHECK-NEXT: ret 10658entry: 10659 tail call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 4) 10660 ret void 10661} 10662 10663 10664define void @test_vsoxseg8_nxv4f16_triscv.vector.tuple_nxv8i8_8t_nxv4i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, <vscale x 4 x i16> %index, i64 %vl) { 10665; CHECK-LABEL: test_vsoxseg8_nxv4f16_triscv.vector.tuple_nxv8i8_8t_nxv4i16: 10666; CHECK: # %bb.0: # %entry 10667; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma 10668; CHECK-NEXT: vsoxseg8ei16.v v8, (a0), v16 10669; CHECK-NEXT: ret 10670entry: 10671 tail call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, <vscale x 4 x i16> %index, i64 %vl, i64 4) 10672 ret void 10673} 10674 10675define void @test_vsoxseg8_mask_nxv4f16_triscv.vector.tuple_nxv8i8_8t_nxv4i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, <vscale x 4 x i16> %index, i64 %vl, <vscale x 4 x i1> %mask) { 10676; CHECK-LABEL: test_vsoxseg8_mask_nxv4f16_triscv.vector.tuple_nxv8i8_8t_nxv4i16: 10677; CHECK: # %bb.0: # %entry 10678; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma 10679; CHECK-NEXT: vsoxseg8ei16.v v8, (a0), v16, v0.t 10680; CHECK-NEXT: ret 10681entry: 10682 tail call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 4) 10683 ret void 10684} 10685 10686 10687define void @test_vsoxseg8_nxv4f16_triscv.vector.tuple_nxv8i8_8t_nxv4i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, <vscale x 4 x i32> %index, i64 %vl) { 10688; CHECK-LABEL: test_vsoxseg8_nxv4f16_triscv.vector.tuple_nxv8i8_8t_nxv4i32: 10689; CHECK: # %bb.0: # %entry 10690; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma 10691; CHECK-NEXT: vsoxseg8ei32.v v8, (a0), v16 10692; CHECK-NEXT: ret 10693entry: 10694 tail call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, <vscale x 4 x i32> %index, i64 %vl, i64 4) 10695 ret void 10696} 10697 10698define void @test_vsoxseg8_mask_nxv4f16_triscv.vector.tuple_nxv8i8_8t_nxv4i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, <vscale x 4 x i32> %index, i64 %vl, <vscale x 4 x i1> %mask) { 10699; CHECK-LABEL: test_vsoxseg8_mask_nxv4f16_triscv.vector.tuple_nxv8i8_8t_nxv4i32: 10700; CHECK: # %bb.0: # %entry 10701; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma 10702; CHECK-NEXT: vsoxseg8ei32.v v8, (a0), v16, v0.t 10703; CHECK-NEXT: ret 10704entry: 10705 tail call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 4) 10706 ret void 10707} 10708 10709 10710define void @test_vsoxseg8_nxv4f16_triscv.vector.tuple_nxv8i8_8t_nxv4i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, <vscale x 4 x i64> %index, i64 %vl) { 10711; CHECK-LABEL: test_vsoxseg8_nxv4f16_triscv.vector.tuple_nxv8i8_8t_nxv4i64: 10712; CHECK: # %bb.0: # %entry 10713; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma 10714; CHECK-NEXT: vsoxseg8ei64.v v8, (a0), v16 10715; CHECK-NEXT: ret 10716entry: 10717 tail call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, <vscale x 4 x i64> %index, i64 %vl, i64 4) 10718 ret void 10719} 10720 10721define void @test_vsoxseg8_mask_nxv4f16_triscv.vector.tuple_nxv8i8_8t_nxv4i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, <vscale x 4 x i64> %index, i64 %vl, <vscale x 4 x i1> %mask) { 10722; CHECK-LABEL: test_vsoxseg8_mask_nxv4f16_triscv.vector.tuple_nxv8i8_8t_nxv4i64: 10723; CHECK: # %bb.0: # %entry 10724; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma 10725; CHECK-NEXT: vsoxseg8ei64.v v8, (a0), v16, v0.t 10726; CHECK-NEXT: ret 10727entry: 10728 tail call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i64.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 4) 10729 ret void 10730} 10731 10732 10733define void @test_vsoxseg2_nxv1f32_triscv.vector.tuple_nxv4i8_2t_nxv1i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %val, ptr %base, <vscale x 1 x i8> %index, i64 %vl) { 10734; CHECK-LABEL: test_vsoxseg2_nxv1f32_triscv.vector.tuple_nxv4i8_2t_nxv1i8: 10735; CHECK: # %bb.0: # %entry 10736; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma 10737; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10 10738; CHECK-NEXT: ret 10739entry: 10740 tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %val, ptr %base, <vscale x 1 x i8> %index, i64 %vl, i64 5) 10741 ret void 10742} 10743 10744define void @test_vsoxseg2_mask_nxv1f32_triscv.vector.tuple_nxv4i8_2t_nxv1i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %val, ptr %base, <vscale x 1 x i8> %index, i64 %vl, <vscale x 1 x i1> %mask) { 10745; CHECK-LABEL: test_vsoxseg2_mask_nxv1f32_triscv.vector.tuple_nxv4i8_2t_nxv1i8: 10746; CHECK: # %bb.0: # %entry 10747; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma 10748; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10, v0.t 10749; CHECK-NEXT: ret 10750entry: 10751 tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i8.nxv1i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 5) 10752 ret void 10753} 10754 10755 10756define void @test_vsoxseg2_nxv1f32_triscv.vector.tuple_nxv4i8_2t_nxv1i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl) { 10757; CHECK-LABEL: test_vsoxseg2_nxv1f32_triscv.vector.tuple_nxv4i8_2t_nxv1i16: 10758; CHECK: # %bb.0: # %entry 10759; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma 10760; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10 10761; CHECK-NEXT: ret 10762entry: 10763 tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl, i64 5) 10764 ret void 10765} 10766 10767define void @test_vsoxseg2_mask_nxv1f32_triscv.vector.tuple_nxv4i8_2t_nxv1i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl, <vscale x 1 x i1> %mask) { 10768; CHECK-LABEL: test_vsoxseg2_mask_nxv1f32_triscv.vector.tuple_nxv4i8_2t_nxv1i16: 10769; CHECK: # %bb.0: # %entry 10770; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma 10771; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10, v0.t 10772; CHECK-NEXT: ret 10773entry: 10774 tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 5) 10775 ret void 10776} 10777 10778 10779define void @test_vsoxseg2_nxv1f32_triscv.vector.tuple_nxv4i8_2t_nxv1i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %val, ptr %base, <vscale x 1 x i32> %index, i64 %vl) { 10780; CHECK-LABEL: test_vsoxseg2_nxv1f32_triscv.vector.tuple_nxv4i8_2t_nxv1i32: 10781; CHECK: # %bb.0: # %entry 10782; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma 10783; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10 10784; CHECK-NEXT: ret 10785entry: 10786 tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %val, ptr %base, <vscale x 1 x i32> %index, i64 %vl, i64 5) 10787 ret void 10788} 10789 10790define void @test_vsoxseg2_mask_nxv1f32_triscv.vector.tuple_nxv4i8_2t_nxv1i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %val, ptr %base, <vscale x 1 x i32> %index, i64 %vl, <vscale x 1 x i1> %mask) { 10791; CHECK-LABEL: test_vsoxseg2_mask_nxv1f32_triscv.vector.tuple_nxv4i8_2t_nxv1i32: 10792; CHECK: # %bb.0: # %entry 10793; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma 10794; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10, v0.t 10795; CHECK-NEXT: ret 10796entry: 10797 tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i32.nxv1i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 5) 10798 ret void 10799} 10800 10801 10802define void @test_vsoxseg2_nxv1f32_triscv.vector.tuple_nxv4i8_2t_nxv1i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %val, ptr %base, <vscale x 1 x i64> %index, i64 %vl) { 10803; CHECK-LABEL: test_vsoxseg2_nxv1f32_triscv.vector.tuple_nxv4i8_2t_nxv1i64: 10804; CHECK: # %bb.0: # %entry 10805; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma 10806; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v10 10807; CHECK-NEXT: ret 10808entry: 10809 tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %val, ptr %base, <vscale x 1 x i64> %index, i64 %vl, i64 5) 10810 ret void 10811} 10812 10813define void @test_vsoxseg2_mask_nxv1f32_triscv.vector.tuple_nxv4i8_2t_nxv1i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %val, ptr %base, <vscale x 1 x i64> %index, i64 %vl, <vscale x 1 x i1> %mask) { 10814; CHECK-LABEL: test_vsoxseg2_mask_nxv1f32_triscv.vector.tuple_nxv4i8_2t_nxv1i64: 10815; CHECK: # %bb.0: # %entry 10816; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma 10817; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v10, v0.t 10818; CHECK-NEXT: ret 10819entry: 10820 tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i64.nxv1i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %val, ptr %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 5) 10821 ret void 10822} 10823 10824 10825define void @test_vsoxseg2_nxv2f32_triscv.vector.tuple_nxv8i8_2t_nxv2i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, <vscale x 2 x i8> %index, i64 %vl) { 10826; CHECK-LABEL: test_vsoxseg2_nxv2f32_triscv.vector.tuple_nxv8i8_2t_nxv2i8: 10827; CHECK: # %bb.0: # %entry 10828; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma 10829; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10 10830; CHECK-NEXT: ret 10831entry: 10832 tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, <vscale x 2 x i8> %index, i64 %vl, i64 5) 10833 ret void 10834} 10835 10836define void @test_vsoxseg2_mask_nxv2f32_triscv.vector.tuple_nxv8i8_2t_nxv2i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, <vscale x 2 x i8> %index, i64 %vl, <vscale x 2 x i1> %mask) { 10837; CHECK-LABEL: test_vsoxseg2_mask_nxv2f32_triscv.vector.tuple_nxv8i8_2t_nxv2i8: 10838; CHECK: # %bb.0: # %entry 10839; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma 10840; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10, v0.t 10841; CHECK-NEXT: ret 10842entry: 10843 tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i8.nxv2i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 5) 10844 ret void 10845} 10846 10847 10848define void @test_vsoxseg2_nxv2f32_triscv.vector.tuple_nxv8i8_2t_nxv2i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, <vscale x 2 x i16> %index, i64 %vl) { 10849; CHECK-LABEL: test_vsoxseg2_nxv2f32_triscv.vector.tuple_nxv8i8_2t_nxv2i16: 10850; CHECK: # %bb.0: # %entry 10851; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma 10852; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10 10853; CHECK-NEXT: ret 10854entry: 10855 tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, <vscale x 2 x i16> %index, i64 %vl, i64 5) 10856 ret void 10857} 10858 10859define void @test_vsoxseg2_mask_nxv2f32_triscv.vector.tuple_nxv8i8_2t_nxv2i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, <vscale x 2 x i16> %index, i64 %vl, <vscale x 2 x i1> %mask) { 10860; CHECK-LABEL: test_vsoxseg2_mask_nxv2f32_triscv.vector.tuple_nxv8i8_2t_nxv2i16: 10861; CHECK: # %bb.0: # %entry 10862; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma 10863; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10, v0.t 10864; CHECK-NEXT: ret 10865entry: 10866 tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i16.nxv2i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 5) 10867 ret void 10868} 10869 10870 10871define void @test_vsoxseg2_nxv2f32_triscv.vector.tuple_nxv8i8_2t_nxv2i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, <vscale x 2 x i32> %index, i64 %vl) { 10872; CHECK-LABEL: test_vsoxseg2_nxv2f32_triscv.vector.tuple_nxv8i8_2t_nxv2i32: 10873; CHECK: # %bb.0: # %entry 10874; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma 10875; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10 10876; CHECK-NEXT: ret 10877entry: 10878 tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, <vscale x 2 x i32> %index, i64 %vl, i64 5) 10879 ret void 10880} 10881 10882define void @test_vsoxseg2_mask_nxv2f32_triscv.vector.tuple_nxv8i8_2t_nxv2i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, <vscale x 2 x i32> %index, i64 %vl, <vscale x 2 x i1> %mask) { 10883; CHECK-LABEL: test_vsoxseg2_mask_nxv2f32_triscv.vector.tuple_nxv8i8_2t_nxv2i32: 10884; CHECK: # %bb.0: # %entry 10885; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma 10886; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10, v0.t 10887; CHECK-NEXT: ret 10888entry: 10889 tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i32.nxv2i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 5) 10890 ret void 10891} 10892 10893 10894define void @test_vsoxseg2_nxv2f32_triscv.vector.tuple_nxv8i8_2t_nxv2i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl) { 10895; CHECK-LABEL: test_vsoxseg2_nxv2f32_triscv.vector.tuple_nxv8i8_2t_nxv2i64: 10896; CHECK: # %bb.0: # %entry 10897; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma 10898; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v10 10899; CHECK-NEXT: ret 10900entry: 10901 tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl, i64 5) 10902 ret void 10903} 10904 10905define void @test_vsoxseg2_mask_nxv2f32_triscv.vector.tuple_nxv8i8_2t_nxv2i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl, <vscale x 2 x i1> %mask) { 10906; CHECK-LABEL: test_vsoxseg2_mask_nxv2f32_triscv.vector.tuple_nxv8i8_2t_nxv2i64: 10907; CHECK: # %bb.0: # %entry 10908; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma 10909; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v10, v0.t 10910; CHECK-NEXT: ret 10911entry: 10912 tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i64.nxv2i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 5) 10913 ret void 10914} 10915 10916 10917define void @test_vsoxseg2_nxv4f32_triscv.vector.tuple_nxv16i8_2t_nxv4i8(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, <vscale x 4 x i8> %index, i64 %vl) { 10918; CHECK-LABEL: test_vsoxseg2_nxv4f32_triscv.vector.tuple_nxv16i8_2t_nxv4i8: 10919; CHECK: # %bb.0: # %entry 10920; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma 10921; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v12 10922; CHECK-NEXT: ret 10923entry: 10924 tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i8(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, <vscale x 4 x i8> %index, i64 %vl, i64 5) 10925 ret void 10926} 10927 10928define void @test_vsoxseg2_mask_nxv4f32_triscv.vector.tuple_nxv16i8_2t_nxv4i8(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, <vscale x 4 x i8> %index, i64 %vl, <vscale x 4 x i1> %mask) { 10929; CHECK-LABEL: test_vsoxseg2_mask_nxv4f32_triscv.vector.tuple_nxv16i8_2t_nxv4i8: 10930; CHECK: # %bb.0: # %entry 10931; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma 10932; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v12, v0.t 10933; CHECK-NEXT: ret 10934entry: 10935 tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i8.nxv4i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 5) 10936 ret void 10937} 10938 10939 10940define void @test_vsoxseg2_nxv4f32_triscv.vector.tuple_nxv16i8_2t_nxv4i16(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, <vscale x 4 x i16> %index, i64 %vl) { 10941; CHECK-LABEL: test_vsoxseg2_nxv4f32_triscv.vector.tuple_nxv16i8_2t_nxv4i16: 10942; CHECK: # %bb.0: # %entry 10943; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma 10944; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v12 10945; CHECK-NEXT: ret 10946entry: 10947 tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i16(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, <vscale x 4 x i16> %index, i64 %vl, i64 5) 10948 ret void 10949} 10950 10951define void @test_vsoxseg2_mask_nxv4f32_triscv.vector.tuple_nxv16i8_2t_nxv4i16(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, <vscale x 4 x i16> %index, i64 %vl, <vscale x 4 x i1> %mask) { 10952; CHECK-LABEL: test_vsoxseg2_mask_nxv4f32_triscv.vector.tuple_nxv16i8_2t_nxv4i16: 10953; CHECK: # %bb.0: # %entry 10954; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma 10955; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v12, v0.t 10956; CHECK-NEXT: ret 10957entry: 10958 tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i16.nxv4i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 5) 10959 ret void 10960} 10961 10962 10963define void @test_vsoxseg2_nxv4f32_triscv.vector.tuple_nxv16i8_2t_nxv4i32(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, <vscale x 4 x i32> %index, i64 %vl) { 10964; CHECK-LABEL: test_vsoxseg2_nxv4f32_triscv.vector.tuple_nxv16i8_2t_nxv4i32: 10965; CHECK: # %bb.0: # %entry 10966; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma 10967; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v12 10968; CHECK-NEXT: ret 10969entry: 10970 tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i32(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, <vscale x 4 x i32> %index, i64 %vl, i64 5) 10971 ret void 10972} 10973 10974define void @test_vsoxseg2_mask_nxv4f32_triscv.vector.tuple_nxv16i8_2t_nxv4i32(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, <vscale x 4 x i32> %index, i64 %vl, <vscale x 4 x i1> %mask) { 10975; CHECK-LABEL: test_vsoxseg2_mask_nxv4f32_triscv.vector.tuple_nxv16i8_2t_nxv4i32: 10976; CHECK: # %bb.0: # %entry 10977; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma 10978; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v12, v0.t 10979; CHECK-NEXT: ret 10980entry: 10981 tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i32.nxv4i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 5) 10982 ret void 10983} 10984 10985 10986define void @test_vsoxseg2_nxv4f32_triscv.vector.tuple_nxv16i8_2t_nxv4i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, <vscale x 4 x i64> %index, i64 %vl) { 10987; CHECK-LABEL: test_vsoxseg2_nxv4f32_triscv.vector.tuple_nxv16i8_2t_nxv4i64: 10988; CHECK: # %bb.0: # %entry 10989; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma 10990; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v12 10991; CHECK-NEXT: ret 10992entry: 10993 tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, <vscale x 4 x i64> %index, i64 %vl, i64 5) 10994 ret void 10995} 10996 10997define void @test_vsoxseg2_mask_nxv4f32_triscv.vector.tuple_nxv16i8_2t_nxv4i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, <vscale x 4 x i64> %index, i64 %vl, <vscale x 4 x i1> %mask) { 10998; CHECK-LABEL: test_vsoxseg2_mask_nxv4f32_triscv.vector.tuple_nxv16i8_2t_nxv4i64: 10999; CHECK: # %bb.0: # %entry 11000; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma 11001; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v12, v0.t 11002; CHECK-NEXT: ret 11003entry: 11004 tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i64.nxv4i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 5) 11005 ret void 11006} 11007 11008 11009define void @test_vsoxseg2_nxv8f32_triscv.vector.tuple_nxv32i8_2t_nxv8i8(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, <vscale x 8 x i8> %index, i64 %vl) { 11010; CHECK-LABEL: test_vsoxseg2_nxv8f32_triscv.vector.tuple_nxv32i8_2t_nxv8i8: 11011; CHECK: # %bb.0: # %entry 11012; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma 11013; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v16 11014; CHECK-NEXT: ret 11015entry: 11016 tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i8(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, <vscale x 8 x i8> %index, i64 %vl, i64 5) 11017 ret void 11018} 11019 11020define void @test_vsoxseg2_mask_nxv8f32_triscv.vector.tuple_nxv32i8_2t_nxv8i8(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, <vscale x 8 x i8> %index, i64 %vl, <vscale x 8 x i1> %mask) { 11021; CHECK-LABEL: test_vsoxseg2_mask_nxv8f32_triscv.vector.tuple_nxv32i8_2t_nxv8i8: 11022; CHECK: # %bb.0: # %entry 11023; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma 11024; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v16, v0.t 11025; CHECK-NEXT: ret 11026entry: 11027 tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i8.nxv8i1(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, <vscale x 8 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 5) 11028 ret void 11029} 11030 11031 11032define void @test_vsoxseg2_nxv8f32_triscv.vector.tuple_nxv32i8_2t_nxv8i16(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, <vscale x 8 x i16> %index, i64 %vl) { 11033; CHECK-LABEL: test_vsoxseg2_nxv8f32_triscv.vector.tuple_nxv32i8_2t_nxv8i16: 11034; CHECK: # %bb.0: # %entry 11035; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma 11036; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v16 11037; CHECK-NEXT: ret 11038entry: 11039 tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i16(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, <vscale x 8 x i16> %index, i64 %vl, i64 5) 11040 ret void 11041} 11042 11043define void @test_vsoxseg2_mask_nxv8f32_triscv.vector.tuple_nxv32i8_2t_nxv8i16(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, <vscale x 8 x i16> %index, i64 %vl, <vscale x 8 x i1> %mask) { 11044; CHECK-LABEL: test_vsoxseg2_mask_nxv8f32_triscv.vector.tuple_nxv32i8_2t_nxv8i16: 11045; CHECK: # %bb.0: # %entry 11046; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma 11047; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v16, v0.t 11048; CHECK-NEXT: ret 11049entry: 11050 tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i16.nxv8i1(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 5) 11051 ret void 11052} 11053 11054 11055define void @test_vsoxseg2_nxv8f32_triscv.vector.tuple_nxv32i8_2t_nxv8i32(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, <vscale x 8 x i32> %index, i64 %vl) { 11056; CHECK-LABEL: test_vsoxseg2_nxv8f32_triscv.vector.tuple_nxv32i8_2t_nxv8i32: 11057; CHECK: # %bb.0: # %entry 11058; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma 11059; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v16 11060; CHECK-NEXT: ret 11061entry: 11062 tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i32(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, <vscale x 8 x i32> %index, i64 %vl, i64 5) 11063 ret void 11064} 11065 11066define void @test_vsoxseg2_mask_nxv8f32_triscv.vector.tuple_nxv32i8_2t_nxv8i32(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, <vscale x 8 x i32> %index, i64 %vl, <vscale x 8 x i1> %mask) { 11067; CHECK-LABEL: test_vsoxseg2_mask_nxv8f32_triscv.vector.tuple_nxv32i8_2t_nxv8i32: 11068; CHECK: # %bb.0: # %entry 11069; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma 11070; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v16, v0.t 11071; CHECK-NEXT: ret 11072entry: 11073 tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i32.nxv8i1(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 5) 11074 ret void 11075} 11076 11077 11078define void @test_vsoxseg2_nxv8f32_triscv.vector.tuple_nxv32i8_2t_nxv8i64(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, <vscale x 8 x i64> %index, i64 %vl) { 11079; CHECK-LABEL: test_vsoxseg2_nxv8f32_triscv.vector.tuple_nxv32i8_2t_nxv8i64: 11080; CHECK: # %bb.0: # %entry 11081; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma 11082; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v16 11083; CHECK-NEXT: ret 11084entry: 11085 tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i64(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, <vscale x 8 x i64> %index, i64 %vl, i64 5) 11086 ret void 11087} 11088 11089define void @test_vsoxseg2_mask_nxv8f32_triscv.vector.tuple_nxv32i8_2t_nxv8i64(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, <vscale x 8 x i64> %index, i64 %vl, <vscale x 8 x i1> %mask) { 11090; CHECK-LABEL: test_vsoxseg2_mask_nxv8f32_triscv.vector.tuple_nxv32i8_2t_nxv8i64: 11091; CHECK: # %bb.0: # %entry 11092; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma 11093; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v16, v0.t 11094; CHECK-NEXT: ret 11095entry: 11096 tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i64.nxv8i1(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, <vscale x 8 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 5) 11097 ret void 11098} 11099 11100 11101define void @test_vsoxseg3_nxv1f32_triscv.vector.tuple_nxv4i8_3t_nxv1i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %val, ptr %base, <vscale x 1 x i8> %index, i64 %vl) { 11102; CHECK-LABEL: test_vsoxseg3_nxv1f32_triscv.vector.tuple_nxv4i8_3t_nxv1i8: 11103; CHECK: # %bb.0: # %entry 11104; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma 11105; CHECK-NEXT: vsoxseg3ei8.v v8, (a0), v11 11106; CHECK-NEXT: ret 11107entry: 11108 tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %val, ptr %base, <vscale x 1 x i8> %index, i64 %vl, i64 5) 11109 ret void 11110} 11111 11112define void @test_vsoxseg3_mask_nxv1f32_triscv.vector.tuple_nxv4i8_3t_nxv1i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %val, ptr %base, <vscale x 1 x i8> %index, i64 %vl, <vscale x 1 x i1> %mask) { 11113; CHECK-LABEL: test_vsoxseg3_mask_nxv1f32_triscv.vector.tuple_nxv4i8_3t_nxv1i8: 11114; CHECK: # %bb.0: # %entry 11115; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma 11116; CHECK-NEXT: vsoxseg3ei8.v v8, (a0), v11, v0.t 11117; CHECK-NEXT: ret 11118entry: 11119 tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i8.nxv1i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 5) 11120 ret void 11121} 11122 11123 11124define void @test_vsoxseg3_nxv1f32_triscv.vector.tuple_nxv4i8_3t_nxv1i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl) { 11125; CHECK-LABEL: test_vsoxseg3_nxv1f32_triscv.vector.tuple_nxv4i8_3t_nxv1i16: 11126; CHECK: # %bb.0: # %entry 11127; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma 11128; CHECK-NEXT: vsoxseg3ei16.v v8, (a0), v11 11129; CHECK-NEXT: ret 11130entry: 11131 tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl, i64 5) 11132 ret void 11133} 11134 11135define void @test_vsoxseg3_mask_nxv1f32_triscv.vector.tuple_nxv4i8_3t_nxv1i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl, <vscale x 1 x i1> %mask) { 11136; CHECK-LABEL: test_vsoxseg3_mask_nxv1f32_triscv.vector.tuple_nxv4i8_3t_nxv1i16: 11137; CHECK: # %bb.0: # %entry 11138; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma 11139; CHECK-NEXT: vsoxseg3ei16.v v8, (a0), v11, v0.t 11140; CHECK-NEXT: ret 11141entry: 11142 tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 5) 11143 ret void 11144} 11145 11146 11147define void @test_vsoxseg3_nxv1f32_triscv.vector.tuple_nxv4i8_3t_nxv1i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %val, ptr %base, <vscale x 1 x i32> %index, i64 %vl) { 11148; CHECK-LABEL: test_vsoxseg3_nxv1f32_triscv.vector.tuple_nxv4i8_3t_nxv1i32: 11149; CHECK: # %bb.0: # %entry 11150; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma 11151; CHECK-NEXT: vsoxseg3ei32.v v8, (a0), v11 11152; CHECK-NEXT: ret 11153entry: 11154 tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %val, ptr %base, <vscale x 1 x i32> %index, i64 %vl, i64 5) 11155 ret void 11156} 11157 11158define void @test_vsoxseg3_mask_nxv1f32_triscv.vector.tuple_nxv4i8_3t_nxv1i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %val, ptr %base, <vscale x 1 x i32> %index, i64 %vl, <vscale x 1 x i1> %mask) { 11159; CHECK-LABEL: test_vsoxseg3_mask_nxv1f32_triscv.vector.tuple_nxv4i8_3t_nxv1i32: 11160; CHECK: # %bb.0: # %entry 11161; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma 11162; CHECK-NEXT: vsoxseg3ei32.v v8, (a0), v11, v0.t 11163; CHECK-NEXT: ret 11164entry: 11165 tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i32.nxv1i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 5) 11166 ret void 11167} 11168 11169 11170define void @test_vsoxseg3_nxv1f32_triscv.vector.tuple_nxv4i8_3t_nxv1i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %val, ptr %base, <vscale x 1 x i64> %index, i64 %vl) { 11171; CHECK-LABEL: test_vsoxseg3_nxv1f32_triscv.vector.tuple_nxv4i8_3t_nxv1i64: 11172; CHECK: # %bb.0: # %entry 11173; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma 11174; CHECK-NEXT: vsoxseg3ei64.v v8, (a0), v11 11175; CHECK-NEXT: ret 11176entry: 11177 tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %val, ptr %base, <vscale x 1 x i64> %index, i64 %vl, i64 5) 11178 ret void 11179} 11180 11181define void @test_vsoxseg3_mask_nxv1f32_triscv.vector.tuple_nxv4i8_3t_nxv1i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %val, ptr %base, <vscale x 1 x i64> %index, i64 %vl, <vscale x 1 x i1> %mask) { 11182; CHECK-LABEL: test_vsoxseg3_mask_nxv1f32_triscv.vector.tuple_nxv4i8_3t_nxv1i64: 11183; CHECK: # %bb.0: # %entry 11184; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma 11185; CHECK-NEXT: vsoxseg3ei64.v v8, (a0), v11, v0.t 11186; CHECK-NEXT: ret 11187entry: 11188 tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i64.nxv1i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %val, ptr %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 5) 11189 ret void 11190} 11191 11192 11193define void @test_vsoxseg3_nxv2f32_triscv.vector.tuple_nxv8i8_3t_nxv2i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, <vscale x 2 x i8> %index, i64 %vl) { 11194; CHECK-LABEL: test_vsoxseg3_nxv2f32_triscv.vector.tuple_nxv8i8_3t_nxv2i8: 11195; CHECK: # %bb.0: # %entry 11196; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma 11197; CHECK-NEXT: vsoxseg3ei8.v v8, (a0), v11 11198; CHECK-NEXT: ret 11199entry: 11200 tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, <vscale x 2 x i8> %index, i64 %vl, i64 5) 11201 ret void 11202} 11203 11204define void @test_vsoxseg3_mask_nxv2f32_triscv.vector.tuple_nxv8i8_3t_nxv2i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, <vscale x 2 x i8> %index, i64 %vl, <vscale x 2 x i1> %mask) { 11205; CHECK-LABEL: test_vsoxseg3_mask_nxv2f32_triscv.vector.tuple_nxv8i8_3t_nxv2i8: 11206; CHECK: # %bb.0: # %entry 11207; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma 11208; CHECK-NEXT: vsoxseg3ei8.v v8, (a0), v11, v0.t 11209; CHECK-NEXT: ret 11210entry: 11211 tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i8.nxv2i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 5) 11212 ret void 11213} 11214 11215 11216define void @test_vsoxseg3_nxv2f32_triscv.vector.tuple_nxv8i8_3t_nxv2i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, <vscale x 2 x i16> %index, i64 %vl) { 11217; CHECK-LABEL: test_vsoxseg3_nxv2f32_triscv.vector.tuple_nxv8i8_3t_nxv2i16: 11218; CHECK: # %bb.0: # %entry 11219; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma 11220; CHECK-NEXT: vsoxseg3ei16.v v8, (a0), v11 11221; CHECK-NEXT: ret 11222entry: 11223 tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, <vscale x 2 x i16> %index, i64 %vl, i64 5) 11224 ret void 11225} 11226 11227define void @test_vsoxseg3_mask_nxv2f32_triscv.vector.tuple_nxv8i8_3t_nxv2i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, <vscale x 2 x i16> %index, i64 %vl, <vscale x 2 x i1> %mask) { 11228; CHECK-LABEL: test_vsoxseg3_mask_nxv2f32_triscv.vector.tuple_nxv8i8_3t_nxv2i16: 11229; CHECK: # %bb.0: # %entry 11230; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma 11231; CHECK-NEXT: vsoxseg3ei16.v v8, (a0), v11, v0.t 11232; CHECK-NEXT: ret 11233entry: 11234 tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i16.nxv2i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 5) 11235 ret void 11236} 11237 11238 11239define void @test_vsoxseg3_nxv2f32_triscv.vector.tuple_nxv8i8_3t_nxv2i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, <vscale x 2 x i32> %index, i64 %vl) { 11240; CHECK-LABEL: test_vsoxseg3_nxv2f32_triscv.vector.tuple_nxv8i8_3t_nxv2i32: 11241; CHECK: # %bb.0: # %entry 11242; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma 11243; CHECK-NEXT: vsoxseg3ei32.v v8, (a0), v11 11244; CHECK-NEXT: ret 11245entry: 11246 tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, <vscale x 2 x i32> %index, i64 %vl, i64 5) 11247 ret void 11248} 11249 11250define void @test_vsoxseg3_mask_nxv2f32_triscv.vector.tuple_nxv8i8_3t_nxv2i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, <vscale x 2 x i32> %index, i64 %vl, <vscale x 2 x i1> %mask) { 11251; CHECK-LABEL: test_vsoxseg3_mask_nxv2f32_triscv.vector.tuple_nxv8i8_3t_nxv2i32: 11252; CHECK: # %bb.0: # %entry 11253; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma 11254; CHECK-NEXT: vsoxseg3ei32.v v8, (a0), v11, v0.t 11255; CHECK-NEXT: ret 11256entry: 11257 tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i32.nxv2i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 5) 11258 ret void 11259} 11260 11261 11262define void @test_vsoxseg3_nxv2f32_triscv.vector.tuple_nxv8i8_3t_nxv2i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl) { 11263; CHECK-LABEL: test_vsoxseg3_nxv2f32_triscv.vector.tuple_nxv8i8_3t_nxv2i64: 11264; CHECK: # %bb.0: # %entry 11265; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma 11266; CHECK-NEXT: vsoxseg3ei64.v v8, (a0), v12 11267; CHECK-NEXT: ret 11268entry: 11269 tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl, i64 5) 11270 ret void 11271} 11272 11273define void @test_vsoxseg3_mask_nxv2f32_triscv.vector.tuple_nxv8i8_3t_nxv2i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl, <vscale x 2 x i1> %mask) { 11274; CHECK-LABEL: test_vsoxseg3_mask_nxv2f32_triscv.vector.tuple_nxv8i8_3t_nxv2i64: 11275; CHECK: # %bb.0: # %entry 11276; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma 11277; CHECK-NEXT: vsoxseg3ei64.v v8, (a0), v12, v0.t 11278; CHECK-NEXT: ret 11279entry: 11280 tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i64.nxv2i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 5) 11281 ret void 11282} 11283 11284 11285define void @test_vsoxseg3_nxv4f32_triscv.vector.tuple_nxv16i8_3t_nxv4i8(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, <vscale x 4 x i8> %index, i64 %vl) { 11286; CHECK-LABEL: test_vsoxseg3_nxv4f32_triscv.vector.tuple_nxv16i8_3t_nxv4i8: 11287; CHECK: # %bb.0: # %entry 11288; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma 11289; CHECK-NEXT: vsoxseg3ei8.v v8, (a0), v14 11290; CHECK-NEXT: ret 11291entry: 11292 tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i8(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, <vscale x 4 x i8> %index, i64 %vl, i64 5) 11293 ret void 11294} 11295 11296define void @test_vsoxseg3_mask_nxv4f32_triscv.vector.tuple_nxv16i8_3t_nxv4i8(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, <vscale x 4 x i8> %index, i64 %vl, <vscale x 4 x i1> %mask) { 11297; CHECK-LABEL: test_vsoxseg3_mask_nxv4f32_triscv.vector.tuple_nxv16i8_3t_nxv4i8: 11298; CHECK: # %bb.0: # %entry 11299; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma 11300; CHECK-NEXT: vsoxseg3ei8.v v8, (a0), v14, v0.t 11301; CHECK-NEXT: ret 11302entry: 11303 tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i8.nxv4i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 5) 11304 ret void 11305} 11306 11307 11308define void @test_vsoxseg3_nxv4f32_triscv.vector.tuple_nxv16i8_3t_nxv4i16(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, <vscale x 4 x i16> %index, i64 %vl) { 11309; CHECK-LABEL: test_vsoxseg3_nxv4f32_triscv.vector.tuple_nxv16i8_3t_nxv4i16: 11310; CHECK: # %bb.0: # %entry 11311; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma 11312; CHECK-NEXT: vsoxseg3ei16.v v8, (a0), v14 11313; CHECK-NEXT: ret 11314entry: 11315 tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i16(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, <vscale x 4 x i16> %index, i64 %vl, i64 5) 11316 ret void 11317} 11318 11319define void @test_vsoxseg3_mask_nxv4f32_triscv.vector.tuple_nxv16i8_3t_nxv4i16(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, <vscale x 4 x i16> %index, i64 %vl, <vscale x 4 x i1> %mask) { 11320; CHECK-LABEL: test_vsoxseg3_mask_nxv4f32_triscv.vector.tuple_nxv16i8_3t_nxv4i16: 11321; CHECK: # %bb.0: # %entry 11322; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma 11323; CHECK-NEXT: vsoxseg3ei16.v v8, (a0), v14, v0.t 11324; CHECK-NEXT: ret 11325entry: 11326 tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i16.nxv4i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 5) 11327 ret void 11328} 11329 11330 11331define void @test_vsoxseg3_nxv4f32_triscv.vector.tuple_nxv16i8_3t_nxv4i32(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, <vscale x 4 x i32> %index, i64 %vl) { 11332; CHECK-LABEL: test_vsoxseg3_nxv4f32_triscv.vector.tuple_nxv16i8_3t_nxv4i32: 11333; CHECK: # %bb.0: # %entry 11334; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma 11335; CHECK-NEXT: vsoxseg3ei32.v v8, (a0), v14 11336; CHECK-NEXT: ret 11337entry: 11338 tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i32(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, <vscale x 4 x i32> %index, i64 %vl, i64 5) 11339 ret void 11340} 11341 11342define void @test_vsoxseg3_mask_nxv4f32_triscv.vector.tuple_nxv16i8_3t_nxv4i32(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, <vscale x 4 x i32> %index, i64 %vl, <vscale x 4 x i1> %mask) { 11343; CHECK-LABEL: test_vsoxseg3_mask_nxv4f32_triscv.vector.tuple_nxv16i8_3t_nxv4i32: 11344; CHECK: # %bb.0: # %entry 11345; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma 11346; CHECK-NEXT: vsoxseg3ei32.v v8, (a0), v14, v0.t 11347; CHECK-NEXT: ret 11348entry: 11349 tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i32.nxv4i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 5) 11350 ret void 11351} 11352 11353 11354define void @test_vsoxseg3_nxv4f32_triscv.vector.tuple_nxv16i8_3t_nxv4i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, <vscale x 4 x i64> %index, i64 %vl) { 11355; CHECK-LABEL: test_vsoxseg3_nxv4f32_triscv.vector.tuple_nxv16i8_3t_nxv4i64: 11356; CHECK: # %bb.0: # %entry 11357; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma 11358; CHECK-NEXT: vsoxseg3ei64.v v8, (a0), v16 11359; CHECK-NEXT: ret 11360entry: 11361 tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, <vscale x 4 x i64> %index, i64 %vl, i64 5) 11362 ret void 11363} 11364 11365define void @test_vsoxseg3_mask_nxv4f32_triscv.vector.tuple_nxv16i8_3t_nxv4i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, <vscale x 4 x i64> %index, i64 %vl, <vscale x 4 x i1> %mask) { 11366; CHECK-LABEL: test_vsoxseg3_mask_nxv4f32_triscv.vector.tuple_nxv16i8_3t_nxv4i64: 11367; CHECK: # %bb.0: # %entry 11368; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma 11369; CHECK-NEXT: vsoxseg3ei64.v v8, (a0), v16, v0.t 11370; CHECK-NEXT: ret 11371entry: 11372 tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i64.nxv4i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 5) 11373 ret void 11374} 11375 11376 11377define void @test_vsoxseg4_nxv1f32_triscv.vector.tuple_nxv4i8_4t_nxv1i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %val, ptr %base, <vscale x 1 x i8> %index, i64 %vl) { 11378; CHECK-LABEL: test_vsoxseg4_nxv1f32_triscv.vector.tuple_nxv4i8_4t_nxv1i8: 11379; CHECK: # %bb.0: # %entry 11380; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma 11381; CHECK-NEXT: vsoxseg4ei8.v v8, (a0), v12 11382; CHECK-NEXT: ret 11383entry: 11384 tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %val, ptr %base, <vscale x 1 x i8> %index, i64 %vl, i64 5) 11385 ret void 11386} 11387 11388define void @test_vsoxseg4_mask_nxv1f32_triscv.vector.tuple_nxv4i8_4t_nxv1i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %val, ptr %base, <vscale x 1 x i8> %index, i64 %vl, <vscale x 1 x i1> %mask) { 11389; CHECK-LABEL: test_vsoxseg4_mask_nxv1f32_triscv.vector.tuple_nxv4i8_4t_nxv1i8: 11390; CHECK: # %bb.0: # %entry 11391; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma 11392; CHECK-NEXT: vsoxseg4ei8.v v8, (a0), v12, v0.t 11393; CHECK-NEXT: ret 11394entry: 11395 tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i8.nxv1i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 5) 11396 ret void 11397} 11398 11399 11400define void @test_vsoxseg4_nxv1f32_triscv.vector.tuple_nxv4i8_4t_nxv1i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl) { 11401; CHECK-LABEL: test_vsoxseg4_nxv1f32_triscv.vector.tuple_nxv4i8_4t_nxv1i16: 11402; CHECK: # %bb.0: # %entry 11403; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma 11404; CHECK-NEXT: vsoxseg4ei16.v v8, (a0), v12 11405; CHECK-NEXT: ret 11406entry: 11407 tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl, i64 5) 11408 ret void 11409} 11410 11411define void @test_vsoxseg4_mask_nxv1f32_triscv.vector.tuple_nxv4i8_4t_nxv1i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl, <vscale x 1 x i1> %mask) { 11412; CHECK-LABEL: test_vsoxseg4_mask_nxv1f32_triscv.vector.tuple_nxv4i8_4t_nxv1i16: 11413; CHECK: # %bb.0: # %entry 11414; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma 11415; CHECK-NEXT: vsoxseg4ei16.v v8, (a0), v12, v0.t 11416; CHECK-NEXT: ret 11417entry: 11418 tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 5) 11419 ret void 11420} 11421 11422 11423define void @test_vsoxseg4_nxv1f32_triscv.vector.tuple_nxv4i8_4t_nxv1i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %val, ptr %base, <vscale x 1 x i32> %index, i64 %vl) { 11424; CHECK-LABEL: test_vsoxseg4_nxv1f32_triscv.vector.tuple_nxv4i8_4t_nxv1i32: 11425; CHECK: # %bb.0: # %entry 11426; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma 11427; CHECK-NEXT: vsoxseg4ei32.v v8, (a0), v12 11428; CHECK-NEXT: ret 11429entry: 11430 tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %val, ptr %base, <vscale x 1 x i32> %index, i64 %vl, i64 5) 11431 ret void 11432} 11433 11434define void @test_vsoxseg4_mask_nxv1f32_triscv.vector.tuple_nxv4i8_4t_nxv1i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %val, ptr %base, <vscale x 1 x i32> %index, i64 %vl, <vscale x 1 x i1> %mask) { 11435; CHECK-LABEL: test_vsoxseg4_mask_nxv1f32_triscv.vector.tuple_nxv4i8_4t_nxv1i32: 11436; CHECK: # %bb.0: # %entry 11437; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma 11438; CHECK-NEXT: vsoxseg4ei32.v v8, (a0), v12, v0.t 11439; CHECK-NEXT: ret 11440entry: 11441 tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i32.nxv1i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 5) 11442 ret void 11443} 11444 11445 11446define void @test_vsoxseg4_nxv1f32_triscv.vector.tuple_nxv4i8_4t_nxv1i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %val, ptr %base, <vscale x 1 x i64> %index, i64 %vl) { 11447; CHECK-LABEL: test_vsoxseg4_nxv1f32_triscv.vector.tuple_nxv4i8_4t_nxv1i64: 11448; CHECK: # %bb.0: # %entry 11449; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma 11450; CHECK-NEXT: vsoxseg4ei64.v v8, (a0), v12 11451; CHECK-NEXT: ret 11452entry: 11453 tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %val, ptr %base, <vscale x 1 x i64> %index, i64 %vl, i64 5) 11454 ret void 11455} 11456 11457define void @test_vsoxseg4_mask_nxv1f32_triscv.vector.tuple_nxv4i8_4t_nxv1i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %val, ptr %base, <vscale x 1 x i64> %index, i64 %vl, <vscale x 1 x i1> %mask) { 11458; CHECK-LABEL: test_vsoxseg4_mask_nxv1f32_triscv.vector.tuple_nxv4i8_4t_nxv1i64: 11459; CHECK: # %bb.0: # %entry 11460; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma 11461; CHECK-NEXT: vsoxseg4ei64.v v8, (a0), v12, v0.t 11462; CHECK-NEXT: ret 11463entry: 11464 tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i64.nxv1i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %val, ptr %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 5) 11465 ret void 11466} 11467 11468 11469define void @test_vsoxseg4_nxv2f32_triscv.vector.tuple_nxv8i8_4t_nxv2i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, <vscale x 2 x i8> %index, i64 %vl) { 11470; CHECK-LABEL: test_vsoxseg4_nxv2f32_triscv.vector.tuple_nxv8i8_4t_nxv2i8: 11471; CHECK: # %bb.0: # %entry 11472; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma 11473; CHECK-NEXT: vsoxseg4ei8.v v8, (a0), v12 11474; CHECK-NEXT: ret 11475entry: 11476 tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, <vscale x 2 x i8> %index, i64 %vl, i64 5) 11477 ret void 11478} 11479 11480define void @test_vsoxseg4_mask_nxv2f32_triscv.vector.tuple_nxv8i8_4t_nxv2i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, <vscale x 2 x i8> %index, i64 %vl, <vscale x 2 x i1> %mask) { 11481; CHECK-LABEL: test_vsoxseg4_mask_nxv2f32_triscv.vector.tuple_nxv8i8_4t_nxv2i8: 11482; CHECK: # %bb.0: # %entry 11483; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma 11484; CHECK-NEXT: vsoxseg4ei8.v v8, (a0), v12, v0.t 11485; CHECK-NEXT: ret 11486entry: 11487 tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i8.nxv2i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 5) 11488 ret void 11489} 11490 11491 11492define void @test_vsoxseg4_nxv2f32_triscv.vector.tuple_nxv8i8_4t_nxv2i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, <vscale x 2 x i16> %index, i64 %vl) { 11493; CHECK-LABEL: test_vsoxseg4_nxv2f32_triscv.vector.tuple_nxv8i8_4t_nxv2i16: 11494; CHECK: # %bb.0: # %entry 11495; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma 11496; CHECK-NEXT: vsoxseg4ei16.v v8, (a0), v12 11497; CHECK-NEXT: ret 11498entry: 11499 tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, <vscale x 2 x i16> %index, i64 %vl, i64 5) 11500 ret void 11501} 11502 11503define void @test_vsoxseg4_mask_nxv2f32_triscv.vector.tuple_nxv8i8_4t_nxv2i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, <vscale x 2 x i16> %index, i64 %vl, <vscale x 2 x i1> %mask) { 11504; CHECK-LABEL: test_vsoxseg4_mask_nxv2f32_triscv.vector.tuple_nxv8i8_4t_nxv2i16: 11505; CHECK: # %bb.0: # %entry 11506; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma 11507; CHECK-NEXT: vsoxseg4ei16.v v8, (a0), v12, v0.t 11508; CHECK-NEXT: ret 11509entry: 11510 tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i16.nxv2i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 5) 11511 ret void 11512} 11513 11514 11515define void @test_vsoxseg4_nxv2f32_triscv.vector.tuple_nxv8i8_4t_nxv2i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, <vscale x 2 x i32> %index, i64 %vl) { 11516; CHECK-LABEL: test_vsoxseg4_nxv2f32_triscv.vector.tuple_nxv8i8_4t_nxv2i32: 11517; CHECK: # %bb.0: # %entry 11518; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma 11519; CHECK-NEXT: vsoxseg4ei32.v v8, (a0), v12 11520; CHECK-NEXT: ret 11521entry: 11522 tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, <vscale x 2 x i32> %index, i64 %vl, i64 5) 11523 ret void 11524} 11525 11526define void @test_vsoxseg4_mask_nxv2f32_triscv.vector.tuple_nxv8i8_4t_nxv2i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, <vscale x 2 x i32> %index, i64 %vl, <vscale x 2 x i1> %mask) { 11527; CHECK-LABEL: test_vsoxseg4_mask_nxv2f32_triscv.vector.tuple_nxv8i8_4t_nxv2i32: 11528; CHECK: # %bb.0: # %entry 11529; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma 11530; CHECK-NEXT: vsoxseg4ei32.v v8, (a0), v12, v0.t 11531; CHECK-NEXT: ret 11532entry: 11533 tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i32.nxv2i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 5) 11534 ret void 11535} 11536 11537 11538define void @test_vsoxseg4_nxv2f32_triscv.vector.tuple_nxv8i8_4t_nxv2i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl) { 11539; CHECK-LABEL: test_vsoxseg4_nxv2f32_triscv.vector.tuple_nxv8i8_4t_nxv2i64: 11540; CHECK: # %bb.0: # %entry 11541; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma 11542; CHECK-NEXT: vsoxseg4ei64.v v8, (a0), v12 11543; CHECK-NEXT: ret 11544entry: 11545 tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl, i64 5) 11546 ret void 11547} 11548 11549define void @test_vsoxseg4_mask_nxv2f32_triscv.vector.tuple_nxv8i8_4t_nxv2i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl, <vscale x 2 x i1> %mask) { 11550; CHECK-LABEL: test_vsoxseg4_mask_nxv2f32_triscv.vector.tuple_nxv8i8_4t_nxv2i64: 11551; CHECK: # %bb.0: # %entry 11552; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma 11553; CHECK-NEXT: vsoxseg4ei64.v v8, (a0), v12, v0.t 11554; CHECK-NEXT: ret 11555entry: 11556 tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i64.nxv2i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 5) 11557 ret void 11558} 11559 11560 11561define void @test_vsoxseg4_nxv4f32_triscv.vector.tuple_nxv16i8_4t_nxv4i8(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, <vscale x 4 x i8> %index, i64 %vl) { 11562; CHECK-LABEL: test_vsoxseg4_nxv4f32_triscv.vector.tuple_nxv16i8_4t_nxv4i8: 11563; CHECK: # %bb.0: # %entry 11564; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma 11565; CHECK-NEXT: vsoxseg4ei8.v v8, (a0), v16 11566; CHECK-NEXT: ret 11567entry: 11568 tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i8(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, <vscale x 4 x i8> %index, i64 %vl, i64 5) 11569 ret void 11570} 11571 11572define void @test_vsoxseg4_mask_nxv4f32_triscv.vector.tuple_nxv16i8_4t_nxv4i8(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, <vscale x 4 x i8> %index, i64 %vl, <vscale x 4 x i1> %mask) { 11573; CHECK-LABEL: test_vsoxseg4_mask_nxv4f32_triscv.vector.tuple_nxv16i8_4t_nxv4i8: 11574; CHECK: # %bb.0: # %entry 11575; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma 11576; CHECK-NEXT: vsoxseg4ei8.v v8, (a0), v16, v0.t 11577; CHECK-NEXT: ret 11578entry: 11579 tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i8.nxv4i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 5) 11580 ret void 11581} 11582 11583 11584define void @test_vsoxseg4_nxv4f32_triscv.vector.tuple_nxv16i8_4t_nxv4i16(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, <vscale x 4 x i16> %index, i64 %vl) { 11585; CHECK-LABEL: test_vsoxseg4_nxv4f32_triscv.vector.tuple_nxv16i8_4t_nxv4i16: 11586; CHECK: # %bb.0: # %entry 11587; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma 11588; CHECK-NEXT: vsoxseg4ei16.v v8, (a0), v16 11589; CHECK-NEXT: ret 11590entry: 11591 tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i16(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, <vscale x 4 x i16> %index, i64 %vl, i64 5) 11592 ret void 11593} 11594 11595define void @test_vsoxseg4_mask_nxv4f32_triscv.vector.tuple_nxv16i8_4t_nxv4i16(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, <vscale x 4 x i16> %index, i64 %vl, <vscale x 4 x i1> %mask) { 11596; CHECK-LABEL: test_vsoxseg4_mask_nxv4f32_triscv.vector.tuple_nxv16i8_4t_nxv4i16: 11597; CHECK: # %bb.0: # %entry 11598; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma 11599; CHECK-NEXT: vsoxseg4ei16.v v8, (a0), v16, v0.t 11600; CHECK-NEXT: ret 11601entry: 11602 tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i16.nxv4i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 5) 11603 ret void 11604} 11605 11606 11607define void @test_vsoxseg4_nxv4f32_triscv.vector.tuple_nxv16i8_4t_nxv4i32(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, <vscale x 4 x i32> %index, i64 %vl) { 11608; CHECK-LABEL: test_vsoxseg4_nxv4f32_triscv.vector.tuple_nxv16i8_4t_nxv4i32: 11609; CHECK: # %bb.0: # %entry 11610; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma 11611; CHECK-NEXT: vsoxseg4ei32.v v8, (a0), v16 11612; CHECK-NEXT: ret 11613entry: 11614 tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i32(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, <vscale x 4 x i32> %index, i64 %vl, i64 5) 11615 ret void 11616} 11617 11618define void @test_vsoxseg4_mask_nxv4f32_triscv.vector.tuple_nxv16i8_4t_nxv4i32(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, <vscale x 4 x i32> %index, i64 %vl, <vscale x 4 x i1> %mask) { 11619; CHECK-LABEL: test_vsoxseg4_mask_nxv4f32_triscv.vector.tuple_nxv16i8_4t_nxv4i32: 11620; CHECK: # %bb.0: # %entry 11621; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma 11622; CHECK-NEXT: vsoxseg4ei32.v v8, (a0), v16, v0.t 11623; CHECK-NEXT: ret 11624entry: 11625 tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i32.nxv4i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 5) 11626 ret void 11627} 11628 11629 11630define void @test_vsoxseg4_nxv4f32_triscv.vector.tuple_nxv16i8_4t_nxv4i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, <vscale x 4 x i64> %index, i64 %vl) { 11631; CHECK-LABEL: test_vsoxseg4_nxv4f32_triscv.vector.tuple_nxv16i8_4t_nxv4i64: 11632; CHECK: # %bb.0: # %entry 11633; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma 11634; CHECK-NEXT: vsoxseg4ei64.v v8, (a0), v16 11635; CHECK-NEXT: ret 11636entry: 11637 tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, <vscale x 4 x i64> %index, i64 %vl, i64 5) 11638 ret void 11639} 11640 11641define void @test_vsoxseg4_mask_nxv4f32_triscv.vector.tuple_nxv16i8_4t_nxv4i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, <vscale x 4 x i64> %index, i64 %vl, <vscale x 4 x i1> %mask) { 11642; CHECK-LABEL: test_vsoxseg4_mask_nxv4f32_triscv.vector.tuple_nxv16i8_4t_nxv4i64: 11643; CHECK: # %bb.0: # %entry 11644; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma 11645; CHECK-NEXT: vsoxseg4ei64.v v8, (a0), v16, v0.t 11646; CHECK-NEXT: ret 11647entry: 11648 tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i64.nxv4i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 5) 11649 ret void 11650} 11651 11652 11653define void @test_vsoxseg5_nxv1f32_triscv.vector.tuple_nxv4i8_5t_nxv1i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) %val, ptr %base, <vscale x 1 x i8> %index, i64 %vl) { 11654; CHECK-LABEL: test_vsoxseg5_nxv1f32_triscv.vector.tuple_nxv4i8_5t_nxv1i8: 11655; CHECK: # %bb.0: # %entry 11656; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma 11657; CHECK-NEXT: vsoxseg5ei8.v v8, (a0), v13 11658; CHECK-NEXT: ret 11659entry: 11660 tail call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) %val, ptr %base, <vscale x 1 x i8> %index, i64 %vl, i64 5) 11661 ret void 11662} 11663 11664define void @test_vsoxseg5_mask_nxv1f32_triscv.vector.tuple_nxv4i8_5t_nxv1i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) %val, ptr %base, <vscale x 1 x i8> %index, i64 %vl, <vscale x 1 x i1> %mask) { 11665; CHECK-LABEL: test_vsoxseg5_mask_nxv1f32_triscv.vector.tuple_nxv4i8_5t_nxv1i8: 11666; CHECK: # %bb.0: # %entry 11667; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma 11668; CHECK-NEXT: vsoxseg5ei8.v v8, (a0), v13, v0.t 11669; CHECK-NEXT: ret 11670entry: 11671 tail call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i8.nxv1i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 5) 11672 ret void 11673} 11674 11675 11676define void @test_vsoxseg5_nxv1f32_triscv.vector.tuple_nxv4i8_5t_nxv1i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl) { 11677; CHECK-LABEL: test_vsoxseg5_nxv1f32_triscv.vector.tuple_nxv4i8_5t_nxv1i16: 11678; CHECK: # %bb.0: # %entry 11679; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma 11680; CHECK-NEXT: vsoxseg5ei16.v v8, (a0), v13 11681; CHECK-NEXT: ret 11682entry: 11683 tail call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl, i64 5) 11684 ret void 11685} 11686 11687define void @test_vsoxseg5_mask_nxv1f32_triscv.vector.tuple_nxv4i8_5t_nxv1i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl, <vscale x 1 x i1> %mask) { 11688; CHECK-LABEL: test_vsoxseg5_mask_nxv1f32_triscv.vector.tuple_nxv4i8_5t_nxv1i16: 11689; CHECK: # %bb.0: # %entry 11690; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma 11691; CHECK-NEXT: vsoxseg5ei16.v v8, (a0), v13, v0.t 11692; CHECK-NEXT: ret 11693entry: 11694 tail call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 5) 11695 ret void 11696} 11697 11698 11699define void @test_vsoxseg5_nxv1f32_triscv.vector.tuple_nxv4i8_5t_nxv1i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) %val, ptr %base, <vscale x 1 x i32> %index, i64 %vl) { 11700; CHECK-LABEL: test_vsoxseg5_nxv1f32_triscv.vector.tuple_nxv4i8_5t_nxv1i32: 11701; CHECK: # %bb.0: # %entry 11702; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma 11703; CHECK-NEXT: vsoxseg5ei32.v v8, (a0), v13 11704; CHECK-NEXT: ret 11705entry: 11706 tail call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) %val, ptr %base, <vscale x 1 x i32> %index, i64 %vl, i64 5) 11707 ret void 11708} 11709 11710define void @test_vsoxseg5_mask_nxv1f32_triscv.vector.tuple_nxv4i8_5t_nxv1i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) %val, ptr %base, <vscale x 1 x i32> %index, i64 %vl, <vscale x 1 x i1> %mask) { 11711; CHECK-LABEL: test_vsoxseg5_mask_nxv1f32_triscv.vector.tuple_nxv4i8_5t_nxv1i32: 11712; CHECK: # %bb.0: # %entry 11713; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma 11714; CHECK-NEXT: vsoxseg5ei32.v v8, (a0), v13, v0.t 11715; CHECK-NEXT: ret 11716entry: 11717 tail call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i32.nxv1i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 5) 11718 ret void 11719} 11720 11721 11722define void @test_vsoxseg5_nxv1f32_triscv.vector.tuple_nxv4i8_5t_nxv1i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) %val, ptr %base, <vscale x 1 x i64> %index, i64 %vl) { 11723; CHECK-LABEL: test_vsoxseg5_nxv1f32_triscv.vector.tuple_nxv4i8_5t_nxv1i64: 11724; CHECK: # %bb.0: # %entry 11725; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma 11726; CHECK-NEXT: vsoxseg5ei64.v v8, (a0), v13 11727; CHECK-NEXT: ret 11728entry: 11729 tail call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) %val, ptr %base, <vscale x 1 x i64> %index, i64 %vl, i64 5) 11730 ret void 11731} 11732 11733define void @test_vsoxseg5_mask_nxv1f32_triscv.vector.tuple_nxv4i8_5t_nxv1i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) %val, ptr %base, <vscale x 1 x i64> %index, i64 %vl, <vscale x 1 x i1> %mask) { 11734; CHECK-LABEL: test_vsoxseg5_mask_nxv1f32_triscv.vector.tuple_nxv4i8_5t_nxv1i64: 11735; CHECK: # %bb.0: # %entry 11736; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma 11737; CHECK-NEXT: vsoxseg5ei64.v v8, (a0), v13, v0.t 11738; CHECK-NEXT: ret 11739entry: 11740 tail call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i64.nxv1i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) %val, ptr %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 5) 11741 ret void 11742} 11743 11744 11745define void @test_vsoxseg5_nxv2f32_triscv.vector.tuple_nxv8i8_5t_nxv2i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, <vscale x 2 x i8> %index, i64 %vl) { 11746; CHECK-LABEL: test_vsoxseg5_nxv2f32_triscv.vector.tuple_nxv8i8_5t_nxv2i8: 11747; CHECK: # %bb.0: # %entry 11748; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma 11749; CHECK-NEXT: vsoxseg5ei8.v v8, (a0), v13 11750; CHECK-NEXT: ret 11751entry: 11752 tail call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, <vscale x 2 x i8> %index, i64 %vl, i64 5) 11753 ret void 11754} 11755 11756define void @test_vsoxseg5_mask_nxv2f32_triscv.vector.tuple_nxv8i8_5t_nxv2i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, <vscale x 2 x i8> %index, i64 %vl, <vscale x 2 x i1> %mask) { 11757; CHECK-LABEL: test_vsoxseg5_mask_nxv2f32_triscv.vector.tuple_nxv8i8_5t_nxv2i8: 11758; CHECK: # %bb.0: # %entry 11759; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma 11760; CHECK-NEXT: vsoxseg5ei8.v v8, (a0), v13, v0.t 11761; CHECK-NEXT: ret 11762entry: 11763 tail call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i8.nxv2i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 5) 11764 ret void 11765} 11766 11767 11768define void @test_vsoxseg5_nxv2f32_triscv.vector.tuple_nxv8i8_5t_nxv2i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, <vscale x 2 x i16> %index, i64 %vl) { 11769; CHECK-LABEL: test_vsoxseg5_nxv2f32_triscv.vector.tuple_nxv8i8_5t_nxv2i16: 11770; CHECK: # %bb.0: # %entry 11771; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma 11772; CHECK-NEXT: vsoxseg5ei16.v v8, (a0), v13 11773; CHECK-NEXT: ret 11774entry: 11775 tail call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, <vscale x 2 x i16> %index, i64 %vl, i64 5) 11776 ret void 11777} 11778 11779define void @test_vsoxseg5_mask_nxv2f32_triscv.vector.tuple_nxv8i8_5t_nxv2i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, <vscale x 2 x i16> %index, i64 %vl, <vscale x 2 x i1> %mask) { 11780; CHECK-LABEL: test_vsoxseg5_mask_nxv2f32_triscv.vector.tuple_nxv8i8_5t_nxv2i16: 11781; CHECK: # %bb.0: # %entry 11782; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma 11783; CHECK-NEXT: vsoxseg5ei16.v v8, (a0), v13, v0.t 11784; CHECK-NEXT: ret 11785entry: 11786 tail call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i16.nxv2i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 5) 11787 ret void 11788} 11789 11790 11791define void @test_vsoxseg5_nxv2f32_triscv.vector.tuple_nxv8i8_5t_nxv2i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, <vscale x 2 x i32> %index, i64 %vl) { 11792; CHECK-LABEL: test_vsoxseg5_nxv2f32_triscv.vector.tuple_nxv8i8_5t_nxv2i32: 11793; CHECK: # %bb.0: # %entry 11794; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma 11795; CHECK-NEXT: vsoxseg5ei32.v v8, (a0), v13 11796; CHECK-NEXT: ret 11797entry: 11798 tail call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, <vscale x 2 x i32> %index, i64 %vl, i64 5) 11799 ret void 11800} 11801 11802define void @test_vsoxseg5_mask_nxv2f32_triscv.vector.tuple_nxv8i8_5t_nxv2i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, <vscale x 2 x i32> %index, i64 %vl, <vscale x 2 x i1> %mask) { 11803; CHECK-LABEL: test_vsoxseg5_mask_nxv2f32_triscv.vector.tuple_nxv8i8_5t_nxv2i32: 11804; CHECK: # %bb.0: # %entry 11805; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma 11806; CHECK-NEXT: vsoxseg5ei32.v v8, (a0), v13, v0.t 11807; CHECK-NEXT: ret 11808entry: 11809 tail call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i32.nxv2i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 5) 11810 ret void 11811} 11812 11813 11814define void @test_vsoxseg5_nxv2f32_triscv.vector.tuple_nxv8i8_5t_nxv2i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl) { 11815; CHECK-LABEL: test_vsoxseg5_nxv2f32_triscv.vector.tuple_nxv8i8_5t_nxv2i64: 11816; CHECK: # %bb.0: # %entry 11817; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma 11818; CHECK-NEXT: vsoxseg5ei64.v v8, (a0), v14 11819; CHECK-NEXT: ret 11820entry: 11821 tail call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl, i64 5) 11822 ret void 11823} 11824 11825define void @test_vsoxseg5_mask_nxv2f32_triscv.vector.tuple_nxv8i8_5t_nxv2i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl, <vscale x 2 x i1> %mask) { 11826; CHECK-LABEL: test_vsoxseg5_mask_nxv2f32_triscv.vector.tuple_nxv8i8_5t_nxv2i64: 11827; CHECK: # %bb.0: # %entry 11828; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma 11829; CHECK-NEXT: vsoxseg5ei64.v v8, (a0), v14, v0.t 11830; CHECK-NEXT: ret 11831entry: 11832 tail call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i64.nxv2i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 5) 11833 ret void 11834} 11835 11836 11837define void @test_vsoxseg6_nxv1f32_triscv.vector.tuple_nxv4i8_6t_nxv1i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) %val, ptr %base, <vscale x 1 x i8> %index, i64 %vl) { 11838; CHECK-LABEL: test_vsoxseg6_nxv1f32_triscv.vector.tuple_nxv4i8_6t_nxv1i8: 11839; CHECK: # %bb.0: # %entry 11840; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma 11841; CHECK-NEXT: vsoxseg6ei8.v v8, (a0), v14 11842; CHECK-NEXT: ret 11843entry: 11844 tail call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) %val, ptr %base, <vscale x 1 x i8> %index, i64 %vl, i64 5) 11845 ret void 11846} 11847 11848define void @test_vsoxseg6_mask_nxv1f32_triscv.vector.tuple_nxv4i8_6t_nxv1i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) %val, ptr %base, <vscale x 1 x i8> %index, i64 %vl, <vscale x 1 x i1> %mask) { 11849; CHECK-LABEL: test_vsoxseg6_mask_nxv1f32_triscv.vector.tuple_nxv4i8_6t_nxv1i8: 11850; CHECK: # %bb.0: # %entry 11851; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma 11852; CHECK-NEXT: vsoxseg6ei8.v v8, (a0), v14, v0.t 11853; CHECK-NEXT: ret 11854entry: 11855 tail call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i8.nxv1i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 5) 11856 ret void 11857} 11858 11859 11860define void @test_vsoxseg6_nxv1f32_triscv.vector.tuple_nxv4i8_6t_nxv1i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl) { 11861; CHECK-LABEL: test_vsoxseg6_nxv1f32_triscv.vector.tuple_nxv4i8_6t_nxv1i16: 11862; CHECK: # %bb.0: # %entry 11863; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma 11864; CHECK-NEXT: vsoxseg6ei16.v v8, (a0), v14 11865; CHECK-NEXT: ret 11866entry: 11867 tail call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl, i64 5) 11868 ret void 11869} 11870 11871define void @test_vsoxseg6_mask_nxv1f32_triscv.vector.tuple_nxv4i8_6t_nxv1i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl, <vscale x 1 x i1> %mask) { 11872; CHECK-LABEL: test_vsoxseg6_mask_nxv1f32_triscv.vector.tuple_nxv4i8_6t_nxv1i16: 11873; CHECK: # %bb.0: # %entry 11874; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma 11875; CHECK-NEXT: vsoxseg6ei16.v v8, (a0), v14, v0.t 11876; CHECK-NEXT: ret 11877entry: 11878 tail call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 5) 11879 ret void 11880} 11881 11882 11883define void @test_vsoxseg6_nxv1f32_triscv.vector.tuple_nxv4i8_6t_nxv1i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) %val, ptr %base, <vscale x 1 x i32> %index, i64 %vl) { 11884; CHECK-LABEL: test_vsoxseg6_nxv1f32_triscv.vector.tuple_nxv4i8_6t_nxv1i32: 11885; CHECK: # %bb.0: # %entry 11886; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma 11887; CHECK-NEXT: vsoxseg6ei32.v v8, (a0), v14 11888; CHECK-NEXT: ret 11889entry: 11890 tail call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) %val, ptr %base, <vscale x 1 x i32> %index, i64 %vl, i64 5) 11891 ret void 11892} 11893 11894define void @test_vsoxseg6_mask_nxv1f32_triscv.vector.tuple_nxv4i8_6t_nxv1i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) %val, ptr %base, <vscale x 1 x i32> %index, i64 %vl, <vscale x 1 x i1> %mask) { 11895; CHECK-LABEL: test_vsoxseg6_mask_nxv1f32_triscv.vector.tuple_nxv4i8_6t_nxv1i32: 11896; CHECK: # %bb.0: # %entry 11897; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma 11898; CHECK-NEXT: vsoxseg6ei32.v v8, (a0), v14, v0.t 11899; CHECK-NEXT: ret 11900entry: 11901 tail call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i32.nxv1i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 5) 11902 ret void 11903} 11904 11905 11906define void @test_vsoxseg6_nxv1f32_triscv.vector.tuple_nxv4i8_6t_nxv1i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) %val, ptr %base, <vscale x 1 x i64> %index, i64 %vl) { 11907; CHECK-LABEL: test_vsoxseg6_nxv1f32_triscv.vector.tuple_nxv4i8_6t_nxv1i64: 11908; CHECK: # %bb.0: # %entry 11909; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma 11910; CHECK-NEXT: vsoxseg6ei64.v v8, (a0), v14 11911; CHECK-NEXT: ret 11912entry: 11913 tail call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) %val, ptr %base, <vscale x 1 x i64> %index, i64 %vl, i64 5) 11914 ret void 11915} 11916 11917define void @test_vsoxseg6_mask_nxv1f32_triscv.vector.tuple_nxv4i8_6t_nxv1i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) %val, ptr %base, <vscale x 1 x i64> %index, i64 %vl, <vscale x 1 x i1> %mask) { 11918; CHECK-LABEL: test_vsoxseg6_mask_nxv1f32_triscv.vector.tuple_nxv4i8_6t_nxv1i64: 11919; CHECK: # %bb.0: # %entry 11920; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma 11921; CHECK-NEXT: vsoxseg6ei64.v v8, (a0), v14, v0.t 11922; CHECK-NEXT: ret 11923entry: 11924 tail call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i64.nxv1i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) %val, ptr %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 5) 11925 ret void 11926} 11927 11928 11929define void @test_vsoxseg6_nxv2f32_triscv.vector.tuple_nxv8i8_6t_nxv2i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, <vscale x 2 x i8> %index, i64 %vl) { 11930; CHECK-LABEL: test_vsoxseg6_nxv2f32_triscv.vector.tuple_nxv8i8_6t_nxv2i8: 11931; CHECK: # %bb.0: # %entry 11932; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma 11933; CHECK-NEXT: vsoxseg6ei8.v v8, (a0), v14 11934; CHECK-NEXT: ret 11935entry: 11936 tail call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, <vscale x 2 x i8> %index, i64 %vl, i64 5) 11937 ret void 11938} 11939 11940define void @test_vsoxseg6_mask_nxv2f32_triscv.vector.tuple_nxv8i8_6t_nxv2i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, <vscale x 2 x i8> %index, i64 %vl, <vscale x 2 x i1> %mask) { 11941; CHECK-LABEL: test_vsoxseg6_mask_nxv2f32_triscv.vector.tuple_nxv8i8_6t_nxv2i8: 11942; CHECK: # %bb.0: # %entry 11943; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma 11944; CHECK-NEXT: vsoxseg6ei8.v v8, (a0), v14, v0.t 11945; CHECK-NEXT: ret 11946entry: 11947 tail call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i8.nxv2i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 5) 11948 ret void 11949} 11950 11951 11952define void @test_vsoxseg6_nxv2f32_triscv.vector.tuple_nxv8i8_6t_nxv2i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, <vscale x 2 x i16> %index, i64 %vl) { 11953; CHECK-LABEL: test_vsoxseg6_nxv2f32_triscv.vector.tuple_nxv8i8_6t_nxv2i16: 11954; CHECK: # %bb.0: # %entry 11955; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma 11956; CHECK-NEXT: vsoxseg6ei16.v v8, (a0), v14 11957; CHECK-NEXT: ret 11958entry: 11959 tail call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, <vscale x 2 x i16> %index, i64 %vl, i64 5) 11960 ret void 11961} 11962 11963define void @test_vsoxseg6_mask_nxv2f32_triscv.vector.tuple_nxv8i8_6t_nxv2i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, <vscale x 2 x i16> %index, i64 %vl, <vscale x 2 x i1> %mask) { 11964; CHECK-LABEL: test_vsoxseg6_mask_nxv2f32_triscv.vector.tuple_nxv8i8_6t_nxv2i16: 11965; CHECK: # %bb.0: # %entry 11966; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma 11967; CHECK-NEXT: vsoxseg6ei16.v v8, (a0), v14, v0.t 11968; CHECK-NEXT: ret 11969entry: 11970 tail call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i16.nxv2i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 5) 11971 ret void 11972} 11973 11974 11975define void @test_vsoxseg6_nxv2f32_triscv.vector.tuple_nxv8i8_6t_nxv2i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, <vscale x 2 x i32> %index, i64 %vl) { 11976; CHECK-LABEL: test_vsoxseg6_nxv2f32_triscv.vector.tuple_nxv8i8_6t_nxv2i32: 11977; CHECK: # %bb.0: # %entry 11978; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma 11979; CHECK-NEXT: vsoxseg6ei32.v v8, (a0), v14 11980; CHECK-NEXT: ret 11981entry: 11982 tail call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, <vscale x 2 x i32> %index, i64 %vl, i64 5) 11983 ret void 11984} 11985 11986define void @test_vsoxseg6_mask_nxv2f32_triscv.vector.tuple_nxv8i8_6t_nxv2i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, <vscale x 2 x i32> %index, i64 %vl, <vscale x 2 x i1> %mask) { 11987; CHECK-LABEL: test_vsoxseg6_mask_nxv2f32_triscv.vector.tuple_nxv8i8_6t_nxv2i32: 11988; CHECK: # %bb.0: # %entry 11989; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma 11990; CHECK-NEXT: vsoxseg6ei32.v v8, (a0), v14, v0.t 11991; CHECK-NEXT: ret 11992entry: 11993 tail call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i32.nxv2i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 5) 11994 ret void 11995} 11996 11997 11998define void @test_vsoxseg6_nxv2f32_triscv.vector.tuple_nxv8i8_6t_nxv2i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl) { 11999; CHECK-LABEL: test_vsoxseg6_nxv2f32_triscv.vector.tuple_nxv8i8_6t_nxv2i64: 12000; CHECK: # %bb.0: # %entry 12001; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma 12002; CHECK-NEXT: vsoxseg6ei64.v v8, (a0), v14 12003; CHECK-NEXT: ret 12004entry: 12005 tail call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl, i64 5) 12006 ret void 12007} 12008 12009define void @test_vsoxseg6_mask_nxv2f32_triscv.vector.tuple_nxv8i8_6t_nxv2i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl, <vscale x 2 x i1> %mask) { 12010; CHECK-LABEL: test_vsoxseg6_mask_nxv2f32_triscv.vector.tuple_nxv8i8_6t_nxv2i64: 12011; CHECK: # %bb.0: # %entry 12012; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma 12013; CHECK-NEXT: vsoxseg6ei64.v v8, (a0), v14, v0.t 12014; CHECK-NEXT: ret 12015entry: 12016 tail call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i64.nxv2i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 5) 12017 ret void 12018} 12019 12020 12021define void @test_vsoxseg7_nxv1f32_triscv.vector.tuple_nxv4i8_7t_nxv1i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) %val, ptr %base, <vscale x 1 x i8> %index, i64 %vl) { 12022; CHECK-LABEL: test_vsoxseg7_nxv1f32_triscv.vector.tuple_nxv4i8_7t_nxv1i8: 12023; CHECK: # %bb.0: # %entry 12024; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma 12025; CHECK-NEXT: vsoxseg7ei8.v v8, (a0), v15 12026; CHECK-NEXT: ret 12027entry: 12028 tail call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) %val, ptr %base, <vscale x 1 x i8> %index, i64 %vl, i64 5) 12029 ret void 12030} 12031 12032define void @test_vsoxseg7_mask_nxv1f32_triscv.vector.tuple_nxv4i8_7t_nxv1i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) %val, ptr %base, <vscale x 1 x i8> %index, i64 %vl, <vscale x 1 x i1> %mask) { 12033; CHECK-LABEL: test_vsoxseg7_mask_nxv1f32_triscv.vector.tuple_nxv4i8_7t_nxv1i8: 12034; CHECK: # %bb.0: # %entry 12035; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma 12036; CHECK-NEXT: vsoxseg7ei8.v v8, (a0), v15, v0.t 12037; CHECK-NEXT: ret 12038entry: 12039 tail call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i8.nxv1i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 5) 12040 ret void 12041} 12042 12043 12044define void @test_vsoxseg7_nxv1f32_triscv.vector.tuple_nxv4i8_7t_nxv1i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl) { 12045; CHECK-LABEL: test_vsoxseg7_nxv1f32_triscv.vector.tuple_nxv4i8_7t_nxv1i16: 12046; CHECK: # %bb.0: # %entry 12047; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma 12048; CHECK-NEXT: vsoxseg7ei16.v v8, (a0), v15 12049; CHECK-NEXT: ret 12050entry: 12051 tail call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl, i64 5) 12052 ret void 12053} 12054 12055define void @test_vsoxseg7_mask_nxv1f32_triscv.vector.tuple_nxv4i8_7t_nxv1i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl, <vscale x 1 x i1> %mask) { 12056; CHECK-LABEL: test_vsoxseg7_mask_nxv1f32_triscv.vector.tuple_nxv4i8_7t_nxv1i16: 12057; CHECK: # %bb.0: # %entry 12058; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma 12059; CHECK-NEXT: vsoxseg7ei16.v v8, (a0), v15, v0.t 12060; CHECK-NEXT: ret 12061entry: 12062 tail call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 5) 12063 ret void 12064} 12065 12066 12067define void @test_vsoxseg7_nxv1f32_triscv.vector.tuple_nxv4i8_7t_nxv1i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) %val, ptr %base, <vscale x 1 x i32> %index, i64 %vl) { 12068; CHECK-LABEL: test_vsoxseg7_nxv1f32_triscv.vector.tuple_nxv4i8_7t_nxv1i32: 12069; CHECK: # %bb.0: # %entry 12070; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma 12071; CHECK-NEXT: vsoxseg7ei32.v v8, (a0), v15 12072; CHECK-NEXT: ret 12073entry: 12074 tail call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) %val, ptr %base, <vscale x 1 x i32> %index, i64 %vl, i64 5) 12075 ret void 12076} 12077 12078define void @test_vsoxseg7_mask_nxv1f32_triscv.vector.tuple_nxv4i8_7t_nxv1i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) %val, ptr %base, <vscale x 1 x i32> %index, i64 %vl, <vscale x 1 x i1> %mask) { 12079; CHECK-LABEL: test_vsoxseg7_mask_nxv1f32_triscv.vector.tuple_nxv4i8_7t_nxv1i32: 12080; CHECK: # %bb.0: # %entry 12081; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma 12082; CHECK-NEXT: vsoxseg7ei32.v v8, (a0), v15, v0.t 12083; CHECK-NEXT: ret 12084entry: 12085 tail call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i32.nxv1i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 5) 12086 ret void 12087} 12088 12089 12090define void @test_vsoxseg7_nxv1f32_triscv.vector.tuple_nxv4i8_7t_nxv1i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) %val, ptr %base, <vscale x 1 x i64> %index, i64 %vl) { 12091; CHECK-LABEL: test_vsoxseg7_nxv1f32_triscv.vector.tuple_nxv4i8_7t_nxv1i64: 12092; CHECK: # %bb.0: # %entry 12093; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma 12094; CHECK-NEXT: vsoxseg7ei64.v v8, (a0), v15 12095; CHECK-NEXT: ret 12096entry: 12097 tail call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) %val, ptr %base, <vscale x 1 x i64> %index, i64 %vl, i64 5) 12098 ret void 12099} 12100 12101define void @test_vsoxseg7_mask_nxv1f32_triscv.vector.tuple_nxv4i8_7t_nxv1i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) %val, ptr %base, <vscale x 1 x i64> %index, i64 %vl, <vscale x 1 x i1> %mask) { 12102; CHECK-LABEL: test_vsoxseg7_mask_nxv1f32_triscv.vector.tuple_nxv4i8_7t_nxv1i64: 12103; CHECK: # %bb.0: # %entry 12104; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma 12105; CHECK-NEXT: vsoxseg7ei64.v v8, (a0), v15, v0.t 12106; CHECK-NEXT: ret 12107entry: 12108 tail call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i64.nxv1i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) %val, ptr %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 5) 12109 ret void 12110} 12111 12112 12113define void @test_vsoxseg7_nxv2f32_triscv.vector.tuple_nxv8i8_7t_nxv2i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, <vscale x 2 x i8> %index, i64 %vl) { 12114; CHECK-LABEL: test_vsoxseg7_nxv2f32_triscv.vector.tuple_nxv8i8_7t_nxv2i8: 12115; CHECK: # %bb.0: # %entry 12116; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma 12117; CHECK-NEXT: vsoxseg7ei8.v v8, (a0), v15 12118; CHECK-NEXT: ret 12119entry: 12120 tail call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, <vscale x 2 x i8> %index, i64 %vl, i64 5) 12121 ret void 12122} 12123 12124define void @test_vsoxseg7_mask_nxv2f32_triscv.vector.tuple_nxv8i8_7t_nxv2i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, <vscale x 2 x i8> %index, i64 %vl, <vscale x 2 x i1> %mask) { 12125; CHECK-LABEL: test_vsoxseg7_mask_nxv2f32_triscv.vector.tuple_nxv8i8_7t_nxv2i8: 12126; CHECK: # %bb.0: # %entry 12127; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma 12128; CHECK-NEXT: vsoxseg7ei8.v v8, (a0), v15, v0.t 12129; CHECK-NEXT: ret 12130entry: 12131 tail call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i8.nxv2i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 5) 12132 ret void 12133} 12134 12135 12136define void @test_vsoxseg7_nxv2f32_triscv.vector.tuple_nxv8i8_7t_nxv2i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, <vscale x 2 x i16> %index, i64 %vl) { 12137; CHECK-LABEL: test_vsoxseg7_nxv2f32_triscv.vector.tuple_nxv8i8_7t_nxv2i16: 12138; CHECK: # %bb.0: # %entry 12139; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma 12140; CHECK-NEXT: vsoxseg7ei16.v v8, (a0), v15 12141; CHECK-NEXT: ret 12142entry: 12143 tail call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, <vscale x 2 x i16> %index, i64 %vl, i64 5) 12144 ret void 12145} 12146 12147define void @test_vsoxseg7_mask_nxv2f32_triscv.vector.tuple_nxv8i8_7t_nxv2i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, <vscale x 2 x i16> %index, i64 %vl, <vscale x 2 x i1> %mask) { 12148; CHECK-LABEL: test_vsoxseg7_mask_nxv2f32_triscv.vector.tuple_nxv8i8_7t_nxv2i16: 12149; CHECK: # %bb.0: # %entry 12150; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma 12151; CHECK-NEXT: vsoxseg7ei16.v v8, (a0), v15, v0.t 12152; CHECK-NEXT: ret 12153entry: 12154 tail call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i16.nxv2i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 5) 12155 ret void 12156} 12157 12158 12159define void @test_vsoxseg7_nxv2f32_triscv.vector.tuple_nxv8i8_7t_nxv2i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, <vscale x 2 x i32> %index, i64 %vl) { 12160; CHECK-LABEL: test_vsoxseg7_nxv2f32_triscv.vector.tuple_nxv8i8_7t_nxv2i32: 12161; CHECK: # %bb.0: # %entry 12162; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma 12163; CHECK-NEXT: vsoxseg7ei32.v v8, (a0), v15 12164; CHECK-NEXT: ret 12165entry: 12166 tail call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, <vscale x 2 x i32> %index, i64 %vl, i64 5) 12167 ret void 12168} 12169 12170define void @test_vsoxseg7_mask_nxv2f32_triscv.vector.tuple_nxv8i8_7t_nxv2i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, <vscale x 2 x i32> %index, i64 %vl, <vscale x 2 x i1> %mask) { 12171; CHECK-LABEL: test_vsoxseg7_mask_nxv2f32_triscv.vector.tuple_nxv8i8_7t_nxv2i32: 12172; CHECK: # %bb.0: # %entry 12173; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma 12174; CHECK-NEXT: vsoxseg7ei32.v v8, (a0), v15, v0.t 12175; CHECK-NEXT: ret 12176entry: 12177 tail call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i32.nxv2i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 5) 12178 ret void 12179} 12180 12181 12182define void @test_vsoxseg7_nxv2f32_triscv.vector.tuple_nxv8i8_7t_nxv2i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl) { 12183; CHECK-LABEL: test_vsoxseg7_nxv2f32_triscv.vector.tuple_nxv8i8_7t_nxv2i64: 12184; CHECK: # %bb.0: # %entry 12185; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma 12186; CHECK-NEXT: vsoxseg7ei64.v v8, (a0), v16 12187; CHECK-NEXT: ret 12188entry: 12189 tail call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl, i64 5) 12190 ret void 12191} 12192 12193define void @test_vsoxseg7_mask_nxv2f32_triscv.vector.tuple_nxv8i8_7t_nxv2i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl, <vscale x 2 x i1> %mask) { 12194; CHECK-LABEL: test_vsoxseg7_mask_nxv2f32_triscv.vector.tuple_nxv8i8_7t_nxv2i64: 12195; CHECK: # %bb.0: # %entry 12196; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma 12197; CHECK-NEXT: vsoxseg7ei64.v v8, (a0), v16, v0.t 12198; CHECK-NEXT: ret 12199entry: 12200 tail call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i64.nxv2i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 5) 12201 ret void 12202} 12203 12204 12205define void @test_vsoxseg8_nxv1f32_triscv.vector.tuple_nxv4i8_8t_nxv1i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %val, ptr %base, <vscale x 1 x i8> %index, i64 %vl) { 12206; CHECK-LABEL: test_vsoxseg8_nxv1f32_triscv.vector.tuple_nxv4i8_8t_nxv1i8: 12207; CHECK: # %bb.0: # %entry 12208; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma 12209; CHECK-NEXT: vsoxseg8ei8.v v8, (a0), v16 12210; CHECK-NEXT: ret 12211entry: 12212 tail call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %val, ptr %base, <vscale x 1 x i8> %index, i64 %vl, i64 5) 12213 ret void 12214} 12215 12216define void @test_vsoxseg8_mask_nxv1f32_triscv.vector.tuple_nxv4i8_8t_nxv1i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %val, ptr %base, <vscale x 1 x i8> %index, i64 %vl, <vscale x 1 x i1> %mask) { 12217; CHECK-LABEL: test_vsoxseg8_mask_nxv1f32_triscv.vector.tuple_nxv4i8_8t_nxv1i8: 12218; CHECK: # %bb.0: # %entry 12219; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma 12220; CHECK-NEXT: vsoxseg8ei8.v v8, (a0), v16, v0.t 12221; CHECK-NEXT: ret 12222entry: 12223 tail call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i8.nxv1i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 5) 12224 ret void 12225} 12226 12227 12228define void @test_vsoxseg8_nxv1f32_triscv.vector.tuple_nxv4i8_8t_nxv1i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl) { 12229; CHECK-LABEL: test_vsoxseg8_nxv1f32_triscv.vector.tuple_nxv4i8_8t_nxv1i16: 12230; CHECK: # %bb.0: # %entry 12231; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma 12232; CHECK-NEXT: vsoxseg8ei16.v v8, (a0), v16 12233; CHECK-NEXT: ret 12234entry: 12235 tail call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl, i64 5) 12236 ret void 12237} 12238 12239define void @test_vsoxseg8_mask_nxv1f32_triscv.vector.tuple_nxv4i8_8t_nxv1i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl, <vscale x 1 x i1> %mask) { 12240; CHECK-LABEL: test_vsoxseg8_mask_nxv1f32_triscv.vector.tuple_nxv4i8_8t_nxv1i16: 12241; CHECK: # %bb.0: # %entry 12242; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma 12243; CHECK-NEXT: vsoxseg8ei16.v v8, (a0), v16, v0.t 12244; CHECK-NEXT: ret 12245entry: 12246 tail call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 5) 12247 ret void 12248} 12249 12250 12251define void @test_vsoxseg8_nxv1f32_triscv.vector.tuple_nxv4i8_8t_nxv1i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %val, ptr %base, <vscale x 1 x i32> %index, i64 %vl) { 12252; CHECK-LABEL: test_vsoxseg8_nxv1f32_triscv.vector.tuple_nxv4i8_8t_nxv1i32: 12253; CHECK: # %bb.0: # %entry 12254; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma 12255; CHECK-NEXT: vsoxseg8ei32.v v8, (a0), v16 12256; CHECK-NEXT: ret 12257entry: 12258 tail call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %val, ptr %base, <vscale x 1 x i32> %index, i64 %vl, i64 5) 12259 ret void 12260} 12261 12262define void @test_vsoxseg8_mask_nxv1f32_triscv.vector.tuple_nxv4i8_8t_nxv1i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %val, ptr %base, <vscale x 1 x i32> %index, i64 %vl, <vscale x 1 x i1> %mask) { 12263; CHECK-LABEL: test_vsoxseg8_mask_nxv1f32_triscv.vector.tuple_nxv4i8_8t_nxv1i32: 12264; CHECK: # %bb.0: # %entry 12265; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma 12266; CHECK-NEXT: vsoxseg8ei32.v v8, (a0), v16, v0.t 12267; CHECK-NEXT: ret 12268entry: 12269 tail call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 5) 12270 ret void 12271} 12272 12273 12274define void @test_vsoxseg8_nxv1f32_triscv.vector.tuple_nxv4i8_8t_nxv1i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %val, ptr %base, <vscale x 1 x i64> %index, i64 %vl) { 12275; CHECK-LABEL: test_vsoxseg8_nxv1f32_triscv.vector.tuple_nxv4i8_8t_nxv1i64: 12276; CHECK: # %bb.0: # %entry 12277; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma 12278; CHECK-NEXT: vsoxseg8ei64.v v8, (a0), v16 12279; CHECK-NEXT: ret 12280entry: 12281 tail call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %val, ptr %base, <vscale x 1 x i64> %index, i64 %vl, i64 5) 12282 ret void 12283} 12284 12285define void @test_vsoxseg8_mask_nxv1f32_triscv.vector.tuple_nxv4i8_8t_nxv1i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %val, ptr %base, <vscale x 1 x i64> %index, i64 %vl, <vscale x 1 x i1> %mask) { 12286; CHECK-LABEL: test_vsoxseg8_mask_nxv1f32_triscv.vector.tuple_nxv4i8_8t_nxv1i64: 12287; CHECK: # %bb.0: # %entry 12288; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma 12289; CHECK-NEXT: vsoxseg8ei64.v v8, (a0), v16, v0.t 12290; CHECK-NEXT: ret 12291entry: 12292 tail call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i64.nxv1i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %val, ptr %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 5) 12293 ret void 12294} 12295 12296 12297define void @test_vsoxseg8_nxv2f32_triscv.vector.tuple_nxv8i8_8t_nxv2i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, <vscale x 2 x i8> %index, i64 %vl) { 12298; CHECK-LABEL: test_vsoxseg8_nxv2f32_triscv.vector.tuple_nxv8i8_8t_nxv2i8: 12299; CHECK: # %bb.0: # %entry 12300; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma 12301; CHECK-NEXT: vsoxseg8ei8.v v8, (a0), v16 12302; CHECK-NEXT: ret 12303entry: 12304 tail call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, <vscale x 2 x i8> %index, i64 %vl, i64 5) 12305 ret void 12306} 12307 12308define void @test_vsoxseg8_mask_nxv2f32_triscv.vector.tuple_nxv8i8_8t_nxv2i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, <vscale x 2 x i8> %index, i64 %vl, <vscale x 2 x i1> %mask) { 12309; CHECK-LABEL: test_vsoxseg8_mask_nxv2f32_triscv.vector.tuple_nxv8i8_8t_nxv2i8: 12310; CHECK: # %bb.0: # %entry 12311; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma 12312; CHECK-NEXT: vsoxseg8ei8.v v8, (a0), v16, v0.t 12313; CHECK-NEXT: ret 12314entry: 12315 tail call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i8.nxv2i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 5) 12316 ret void 12317} 12318 12319 12320define void @test_vsoxseg8_nxv2f32_triscv.vector.tuple_nxv8i8_8t_nxv2i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, <vscale x 2 x i16> %index, i64 %vl) { 12321; CHECK-LABEL: test_vsoxseg8_nxv2f32_triscv.vector.tuple_nxv8i8_8t_nxv2i16: 12322; CHECK: # %bb.0: # %entry 12323; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma 12324; CHECK-NEXT: vsoxseg8ei16.v v8, (a0), v16 12325; CHECK-NEXT: ret 12326entry: 12327 tail call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, <vscale x 2 x i16> %index, i64 %vl, i64 5) 12328 ret void 12329} 12330 12331define void @test_vsoxseg8_mask_nxv2f32_triscv.vector.tuple_nxv8i8_8t_nxv2i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, <vscale x 2 x i16> %index, i64 %vl, <vscale x 2 x i1> %mask) { 12332; CHECK-LABEL: test_vsoxseg8_mask_nxv2f32_triscv.vector.tuple_nxv8i8_8t_nxv2i16: 12333; CHECK: # %bb.0: # %entry 12334; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma 12335; CHECK-NEXT: vsoxseg8ei16.v v8, (a0), v16, v0.t 12336; CHECK-NEXT: ret 12337entry: 12338 tail call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i16.nxv2i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 5) 12339 ret void 12340} 12341 12342 12343define void @test_vsoxseg8_nxv2f32_triscv.vector.tuple_nxv8i8_8t_nxv2i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, <vscale x 2 x i32> %index, i64 %vl) { 12344; CHECK-LABEL: test_vsoxseg8_nxv2f32_triscv.vector.tuple_nxv8i8_8t_nxv2i32: 12345; CHECK: # %bb.0: # %entry 12346; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma 12347; CHECK-NEXT: vsoxseg8ei32.v v8, (a0), v16 12348; CHECK-NEXT: ret 12349entry: 12350 tail call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, <vscale x 2 x i32> %index, i64 %vl, i64 5) 12351 ret void 12352} 12353 12354define void @test_vsoxseg8_mask_nxv2f32_triscv.vector.tuple_nxv8i8_8t_nxv2i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, <vscale x 2 x i32> %index, i64 %vl, <vscale x 2 x i1> %mask) { 12355; CHECK-LABEL: test_vsoxseg8_mask_nxv2f32_triscv.vector.tuple_nxv8i8_8t_nxv2i32: 12356; CHECK: # %bb.0: # %entry 12357; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma 12358; CHECK-NEXT: vsoxseg8ei32.v v8, (a0), v16, v0.t 12359; CHECK-NEXT: ret 12360entry: 12361 tail call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 5) 12362 ret void 12363} 12364 12365 12366define void @test_vsoxseg8_nxv2f32_triscv.vector.tuple_nxv8i8_8t_nxv2i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl) { 12367; CHECK-LABEL: test_vsoxseg8_nxv2f32_triscv.vector.tuple_nxv8i8_8t_nxv2i64: 12368; CHECK: # %bb.0: # %entry 12369; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma 12370; CHECK-NEXT: vsoxseg8ei64.v v8, (a0), v16 12371; CHECK-NEXT: ret 12372entry: 12373 tail call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl, i64 5) 12374 ret void 12375} 12376 12377define void @test_vsoxseg8_mask_nxv2f32_triscv.vector.tuple_nxv8i8_8t_nxv2i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl, <vscale x 2 x i1> %mask) { 12378; CHECK-LABEL: test_vsoxseg8_mask_nxv2f32_triscv.vector.tuple_nxv8i8_8t_nxv2i64: 12379; CHECK: # %bb.0: # %entry 12380; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma 12381; CHECK-NEXT: vsoxseg8ei64.v v8, (a0), v16, v0.t 12382; CHECK-NEXT: ret 12383entry: 12384 tail call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i64.nxv2i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 5) 12385 ret void 12386} 12387 12388 12389define void @test_vsoxseg2_nxv1f64_triscv.vector.tuple_nxv8i8_2t_nxv1i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, <vscale x 1 x i8> %index, i64 %vl) { 12390; CHECK-LABEL: test_vsoxseg2_nxv1f64_triscv.vector.tuple_nxv8i8_2t_nxv1i8: 12391; CHECK: # %bb.0: # %entry 12392; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma 12393; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10 12394; CHECK-NEXT: ret 12395entry: 12396 tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, <vscale x 1 x i8> %index, i64 %vl, i64 6) 12397 ret void 12398} 12399 12400define void @test_vsoxseg2_mask_nxv1f64_triscv.vector.tuple_nxv8i8_2t_nxv1i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, <vscale x 1 x i8> %index, i64 %vl, <vscale x 1 x i1> %mask) { 12401; CHECK-LABEL: test_vsoxseg2_mask_nxv1f64_triscv.vector.tuple_nxv8i8_2t_nxv1i8: 12402; CHECK: # %bb.0: # %entry 12403; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma 12404; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10, v0.t 12405; CHECK-NEXT: ret 12406entry: 12407 tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i8.nxv1i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 6) 12408 ret void 12409} 12410 12411 12412define void @test_vsoxseg2_nxv1f64_triscv.vector.tuple_nxv8i8_2t_nxv1i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl) { 12413; CHECK-LABEL: test_vsoxseg2_nxv1f64_triscv.vector.tuple_nxv8i8_2t_nxv1i16: 12414; CHECK: # %bb.0: # %entry 12415; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma 12416; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10 12417; CHECK-NEXT: ret 12418entry: 12419 tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl, i64 6) 12420 ret void 12421} 12422 12423define void @test_vsoxseg2_mask_nxv1f64_triscv.vector.tuple_nxv8i8_2t_nxv1i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl, <vscale x 1 x i1> %mask) { 12424; CHECK-LABEL: test_vsoxseg2_mask_nxv1f64_triscv.vector.tuple_nxv8i8_2t_nxv1i16: 12425; CHECK: # %bb.0: # %entry 12426; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma 12427; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10, v0.t 12428; CHECK-NEXT: ret 12429entry: 12430 tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i16.nxv1i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 6) 12431 ret void 12432} 12433 12434 12435define void @test_vsoxseg2_nxv1f64_triscv.vector.tuple_nxv8i8_2t_nxv1i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, <vscale x 1 x i32> %index, i64 %vl) { 12436; CHECK-LABEL: test_vsoxseg2_nxv1f64_triscv.vector.tuple_nxv8i8_2t_nxv1i32: 12437; CHECK: # %bb.0: # %entry 12438; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma 12439; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10 12440; CHECK-NEXT: ret 12441entry: 12442 tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, <vscale x 1 x i32> %index, i64 %vl, i64 6) 12443 ret void 12444} 12445 12446define void @test_vsoxseg2_mask_nxv1f64_triscv.vector.tuple_nxv8i8_2t_nxv1i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, <vscale x 1 x i32> %index, i64 %vl, <vscale x 1 x i1> %mask) { 12447; CHECK-LABEL: test_vsoxseg2_mask_nxv1f64_triscv.vector.tuple_nxv8i8_2t_nxv1i32: 12448; CHECK: # %bb.0: # %entry 12449; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma 12450; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10, v0.t 12451; CHECK-NEXT: ret 12452entry: 12453 tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i32.nxv1i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 6) 12454 ret void 12455} 12456 12457 12458define void @test_vsoxseg2_nxv1f64_triscv.vector.tuple_nxv8i8_2t_nxv1i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, <vscale x 1 x i64> %index, i64 %vl) { 12459; CHECK-LABEL: test_vsoxseg2_nxv1f64_triscv.vector.tuple_nxv8i8_2t_nxv1i64: 12460; CHECK: # %bb.0: # %entry 12461; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma 12462; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v10 12463; CHECK-NEXT: ret 12464entry: 12465 tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, <vscale x 1 x i64> %index, i64 %vl, i64 6) 12466 ret void 12467} 12468 12469define void @test_vsoxseg2_mask_nxv1f64_triscv.vector.tuple_nxv8i8_2t_nxv1i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, <vscale x 1 x i64> %index, i64 %vl, <vscale x 1 x i1> %mask) { 12470; CHECK-LABEL: test_vsoxseg2_mask_nxv1f64_triscv.vector.tuple_nxv8i8_2t_nxv1i64: 12471; CHECK: # %bb.0: # %entry 12472; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma 12473; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v10, v0.t 12474; CHECK-NEXT: ret 12475entry: 12476 tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i64.nxv1i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 6) 12477 ret void 12478} 12479 12480 12481define void @test_vsoxseg2_nxv2f64_triscv.vector.tuple_nxv16i8_2t_nxv2i8(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, <vscale x 2 x i8> %index, i64 %vl) { 12482; CHECK-LABEL: test_vsoxseg2_nxv2f64_triscv.vector.tuple_nxv16i8_2t_nxv2i8: 12483; CHECK: # %bb.0: # %entry 12484; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma 12485; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v12 12486; CHECK-NEXT: ret 12487entry: 12488 tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i8(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, <vscale x 2 x i8> %index, i64 %vl, i64 6) 12489 ret void 12490} 12491 12492define void @test_vsoxseg2_mask_nxv2f64_triscv.vector.tuple_nxv16i8_2t_nxv2i8(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, <vscale x 2 x i8> %index, i64 %vl, <vscale x 2 x i1> %mask) { 12493; CHECK-LABEL: test_vsoxseg2_mask_nxv2f64_triscv.vector.tuple_nxv16i8_2t_nxv2i8: 12494; CHECK: # %bb.0: # %entry 12495; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma 12496; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v12, v0.t 12497; CHECK-NEXT: ret 12498entry: 12499 tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i8.nxv2i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 6) 12500 ret void 12501} 12502 12503 12504define void @test_vsoxseg2_nxv2f64_triscv.vector.tuple_nxv16i8_2t_nxv2i16(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, <vscale x 2 x i16> %index, i64 %vl) { 12505; CHECK-LABEL: test_vsoxseg2_nxv2f64_triscv.vector.tuple_nxv16i8_2t_nxv2i16: 12506; CHECK: # %bb.0: # %entry 12507; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma 12508; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v12 12509; CHECK-NEXT: ret 12510entry: 12511 tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i16(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, <vscale x 2 x i16> %index, i64 %vl, i64 6) 12512 ret void 12513} 12514 12515define void @test_vsoxseg2_mask_nxv2f64_triscv.vector.tuple_nxv16i8_2t_nxv2i16(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, <vscale x 2 x i16> %index, i64 %vl, <vscale x 2 x i1> %mask) { 12516; CHECK-LABEL: test_vsoxseg2_mask_nxv2f64_triscv.vector.tuple_nxv16i8_2t_nxv2i16: 12517; CHECK: # %bb.0: # %entry 12518; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma 12519; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v12, v0.t 12520; CHECK-NEXT: ret 12521entry: 12522 tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i16.nxv2i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 6) 12523 ret void 12524} 12525 12526 12527define void @test_vsoxseg2_nxv2f64_triscv.vector.tuple_nxv16i8_2t_nxv2i32(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, <vscale x 2 x i32> %index, i64 %vl) { 12528; CHECK-LABEL: test_vsoxseg2_nxv2f64_triscv.vector.tuple_nxv16i8_2t_nxv2i32: 12529; CHECK: # %bb.0: # %entry 12530; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma 12531; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v12 12532; CHECK-NEXT: ret 12533entry: 12534 tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i32(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, <vscale x 2 x i32> %index, i64 %vl, i64 6) 12535 ret void 12536} 12537 12538define void @test_vsoxseg2_mask_nxv2f64_triscv.vector.tuple_nxv16i8_2t_nxv2i32(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, <vscale x 2 x i32> %index, i64 %vl, <vscale x 2 x i1> %mask) { 12539; CHECK-LABEL: test_vsoxseg2_mask_nxv2f64_triscv.vector.tuple_nxv16i8_2t_nxv2i32: 12540; CHECK: # %bb.0: # %entry 12541; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma 12542; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v12, v0.t 12543; CHECK-NEXT: ret 12544entry: 12545 tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i32.nxv2i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 6) 12546 ret void 12547} 12548 12549 12550define void @test_vsoxseg2_nxv2f64_triscv.vector.tuple_nxv16i8_2t_nxv2i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl) { 12551; CHECK-LABEL: test_vsoxseg2_nxv2f64_triscv.vector.tuple_nxv16i8_2t_nxv2i64: 12552; CHECK: # %bb.0: # %entry 12553; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma 12554; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v12 12555; CHECK-NEXT: ret 12556entry: 12557 tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl, i64 6) 12558 ret void 12559} 12560 12561define void @test_vsoxseg2_mask_nxv2f64_triscv.vector.tuple_nxv16i8_2t_nxv2i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl, <vscale x 2 x i1> %mask) { 12562; CHECK-LABEL: test_vsoxseg2_mask_nxv2f64_triscv.vector.tuple_nxv16i8_2t_nxv2i64: 12563; CHECK: # %bb.0: # %entry 12564; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma 12565; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v12, v0.t 12566; CHECK-NEXT: ret 12567entry: 12568 tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i64.nxv2i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 6) 12569 ret void 12570} 12571 12572 12573define void @test_vsoxseg2_nxv4f64_triscv.vector.tuple_nxv32i8_2t_nxv4i8(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, <vscale x 4 x i8> %index, i64 %vl) { 12574; CHECK-LABEL: test_vsoxseg2_nxv4f64_triscv.vector.tuple_nxv32i8_2t_nxv4i8: 12575; CHECK: # %bb.0: # %entry 12576; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma 12577; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v16 12578; CHECK-NEXT: ret 12579entry: 12580 tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i8(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, <vscale x 4 x i8> %index, i64 %vl, i64 6) 12581 ret void 12582} 12583 12584define void @test_vsoxseg2_mask_nxv4f64_triscv.vector.tuple_nxv32i8_2t_nxv4i8(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, <vscale x 4 x i8> %index, i64 %vl, <vscale x 4 x i1> %mask) { 12585; CHECK-LABEL: test_vsoxseg2_mask_nxv4f64_triscv.vector.tuple_nxv32i8_2t_nxv4i8: 12586; CHECK: # %bb.0: # %entry 12587; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma 12588; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v16, v0.t 12589; CHECK-NEXT: ret 12590entry: 12591 tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i8.nxv4i1(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 6) 12592 ret void 12593} 12594 12595 12596define void @test_vsoxseg2_nxv4f64_triscv.vector.tuple_nxv32i8_2t_nxv4i16(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, <vscale x 4 x i16> %index, i64 %vl) { 12597; CHECK-LABEL: test_vsoxseg2_nxv4f64_triscv.vector.tuple_nxv32i8_2t_nxv4i16: 12598; CHECK: # %bb.0: # %entry 12599; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma 12600; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v16 12601; CHECK-NEXT: ret 12602entry: 12603 tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i16(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, <vscale x 4 x i16> %index, i64 %vl, i64 6) 12604 ret void 12605} 12606 12607define void @test_vsoxseg2_mask_nxv4f64_triscv.vector.tuple_nxv32i8_2t_nxv4i16(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, <vscale x 4 x i16> %index, i64 %vl, <vscale x 4 x i1> %mask) { 12608; CHECK-LABEL: test_vsoxseg2_mask_nxv4f64_triscv.vector.tuple_nxv32i8_2t_nxv4i16: 12609; CHECK: # %bb.0: # %entry 12610; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma 12611; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v16, v0.t 12612; CHECK-NEXT: ret 12613entry: 12614 tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i16.nxv4i1(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 6) 12615 ret void 12616} 12617 12618 12619define void @test_vsoxseg2_nxv4f64_triscv.vector.tuple_nxv32i8_2t_nxv4i32(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, <vscale x 4 x i32> %index, i64 %vl) { 12620; CHECK-LABEL: test_vsoxseg2_nxv4f64_triscv.vector.tuple_nxv32i8_2t_nxv4i32: 12621; CHECK: # %bb.0: # %entry 12622; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma 12623; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v16 12624; CHECK-NEXT: ret 12625entry: 12626 tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i32(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, <vscale x 4 x i32> %index, i64 %vl, i64 6) 12627 ret void 12628} 12629 12630define void @test_vsoxseg2_mask_nxv4f64_triscv.vector.tuple_nxv32i8_2t_nxv4i32(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, <vscale x 4 x i32> %index, i64 %vl, <vscale x 4 x i1> %mask) { 12631; CHECK-LABEL: test_vsoxseg2_mask_nxv4f64_triscv.vector.tuple_nxv32i8_2t_nxv4i32: 12632; CHECK: # %bb.0: # %entry 12633; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma 12634; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v16, v0.t 12635; CHECK-NEXT: ret 12636entry: 12637 tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i32.nxv4i1(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 6) 12638 ret void 12639} 12640 12641 12642define void @test_vsoxseg2_nxv4f64_triscv.vector.tuple_nxv32i8_2t_nxv4i64(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, <vscale x 4 x i64> %index, i64 %vl) { 12643; CHECK-LABEL: test_vsoxseg2_nxv4f64_triscv.vector.tuple_nxv32i8_2t_nxv4i64: 12644; CHECK: # %bb.0: # %entry 12645; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma 12646; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v16 12647; CHECK-NEXT: ret 12648entry: 12649 tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i64(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, <vscale x 4 x i64> %index, i64 %vl, i64 6) 12650 ret void 12651} 12652 12653define void @test_vsoxseg2_mask_nxv4f64_triscv.vector.tuple_nxv32i8_2t_nxv4i64(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, <vscale x 4 x i64> %index, i64 %vl, <vscale x 4 x i1> %mask) { 12654; CHECK-LABEL: test_vsoxseg2_mask_nxv4f64_triscv.vector.tuple_nxv32i8_2t_nxv4i64: 12655; CHECK: # %bb.0: # %entry 12656; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma 12657; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v16, v0.t 12658; CHECK-NEXT: ret 12659entry: 12660 tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i64.nxv4i1(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 6) 12661 ret void 12662} 12663 12664 12665define void @test_vsoxseg3_nxv1f64_triscv.vector.tuple_nxv8i8_3t_nxv1i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, <vscale x 1 x i8> %index, i64 %vl) { 12666; CHECK-LABEL: test_vsoxseg3_nxv1f64_triscv.vector.tuple_nxv8i8_3t_nxv1i8: 12667; CHECK: # %bb.0: # %entry 12668; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma 12669; CHECK-NEXT: vsoxseg3ei8.v v8, (a0), v11 12670; CHECK-NEXT: ret 12671entry: 12672 tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, <vscale x 1 x i8> %index, i64 %vl, i64 6) 12673 ret void 12674} 12675 12676define void @test_vsoxseg3_mask_nxv1f64_triscv.vector.tuple_nxv8i8_3t_nxv1i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, <vscale x 1 x i8> %index, i64 %vl, <vscale x 1 x i1> %mask) { 12677; CHECK-LABEL: test_vsoxseg3_mask_nxv1f64_triscv.vector.tuple_nxv8i8_3t_nxv1i8: 12678; CHECK: # %bb.0: # %entry 12679; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma 12680; CHECK-NEXT: vsoxseg3ei8.v v8, (a0), v11, v0.t 12681; CHECK-NEXT: ret 12682entry: 12683 tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i8.nxv1i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 6) 12684 ret void 12685} 12686 12687 12688define void @test_vsoxseg3_nxv1f64_triscv.vector.tuple_nxv8i8_3t_nxv1i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl) { 12689; CHECK-LABEL: test_vsoxseg3_nxv1f64_triscv.vector.tuple_nxv8i8_3t_nxv1i16: 12690; CHECK: # %bb.0: # %entry 12691; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma 12692; CHECK-NEXT: vsoxseg3ei16.v v8, (a0), v11 12693; CHECK-NEXT: ret 12694entry: 12695 tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl, i64 6) 12696 ret void 12697} 12698 12699define void @test_vsoxseg3_mask_nxv1f64_triscv.vector.tuple_nxv8i8_3t_nxv1i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl, <vscale x 1 x i1> %mask) { 12700; CHECK-LABEL: test_vsoxseg3_mask_nxv1f64_triscv.vector.tuple_nxv8i8_3t_nxv1i16: 12701; CHECK: # %bb.0: # %entry 12702; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma 12703; CHECK-NEXT: vsoxseg3ei16.v v8, (a0), v11, v0.t 12704; CHECK-NEXT: ret 12705entry: 12706 tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i16.nxv1i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 6) 12707 ret void 12708} 12709 12710 12711define void @test_vsoxseg3_nxv1f64_triscv.vector.tuple_nxv8i8_3t_nxv1i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, <vscale x 1 x i32> %index, i64 %vl) { 12712; CHECK-LABEL: test_vsoxseg3_nxv1f64_triscv.vector.tuple_nxv8i8_3t_nxv1i32: 12713; CHECK: # %bb.0: # %entry 12714; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma 12715; CHECK-NEXT: vsoxseg3ei32.v v8, (a0), v11 12716; CHECK-NEXT: ret 12717entry: 12718 tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, <vscale x 1 x i32> %index, i64 %vl, i64 6) 12719 ret void 12720} 12721 12722define void @test_vsoxseg3_mask_nxv1f64_triscv.vector.tuple_nxv8i8_3t_nxv1i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, <vscale x 1 x i32> %index, i64 %vl, <vscale x 1 x i1> %mask) { 12723; CHECK-LABEL: test_vsoxseg3_mask_nxv1f64_triscv.vector.tuple_nxv8i8_3t_nxv1i32: 12724; CHECK: # %bb.0: # %entry 12725; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma 12726; CHECK-NEXT: vsoxseg3ei32.v v8, (a0), v11, v0.t 12727; CHECK-NEXT: ret 12728entry: 12729 tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i32.nxv1i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 6) 12730 ret void 12731} 12732 12733 12734define void @test_vsoxseg3_nxv1f64_triscv.vector.tuple_nxv8i8_3t_nxv1i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, <vscale x 1 x i64> %index, i64 %vl) { 12735; CHECK-LABEL: test_vsoxseg3_nxv1f64_triscv.vector.tuple_nxv8i8_3t_nxv1i64: 12736; CHECK: # %bb.0: # %entry 12737; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma 12738; CHECK-NEXT: vsoxseg3ei64.v v8, (a0), v11 12739; CHECK-NEXT: ret 12740entry: 12741 tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, <vscale x 1 x i64> %index, i64 %vl, i64 6) 12742 ret void 12743} 12744 12745define void @test_vsoxseg3_mask_nxv1f64_triscv.vector.tuple_nxv8i8_3t_nxv1i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, <vscale x 1 x i64> %index, i64 %vl, <vscale x 1 x i1> %mask) { 12746; CHECK-LABEL: test_vsoxseg3_mask_nxv1f64_triscv.vector.tuple_nxv8i8_3t_nxv1i64: 12747; CHECK: # %bb.0: # %entry 12748; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma 12749; CHECK-NEXT: vsoxseg3ei64.v v8, (a0), v11, v0.t 12750; CHECK-NEXT: ret 12751entry: 12752 tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i64.nxv1i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 6) 12753 ret void 12754} 12755 12756 12757define void @test_vsoxseg3_nxv2f64_triscv.vector.tuple_nxv16i8_3t_nxv2i8(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, <vscale x 2 x i8> %index, i64 %vl) { 12758; CHECK-LABEL: test_vsoxseg3_nxv2f64_triscv.vector.tuple_nxv16i8_3t_nxv2i8: 12759; CHECK: # %bb.0: # %entry 12760; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma 12761; CHECK-NEXT: vsoxseg3ei8.v v8, (a0), v14 12762; CHECK-NEXT: ret 12763entry: 12764 tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i8(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, <vscale x 2 x i8> %index, i64 %vl, i64 6) 12765 ret void 12766} 12767 12768define void @test_vsoxseg3_mask_nxv2f64_triscv.vector.tuple_nxv16i8_3t_nxv2i8(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, <vscale x 2 x i8> %index, i64 %vl, <vscale x 2 x i1> %mask) { 12769; CHECK-LABEL: test_vsoxseg3_mask_nxv2f64_triscv.vector.tuple_nxv16i8_3t_nxv2i8: 12770; CHECK: # %bb.0: # %entry 12771; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma 12772; CHECK-NEXT: vsoxseg3ei8.v v8, (a0), v14, v0.t 12773; CHECK-NEXT: ret 12774entry: 12775 tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i8.nxv2i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 6) 12776 ret void 12777} 12778 12779 12780define void @test_vsoxseg3_nxv2f64_triscv.vector.tuple_nxv16i8_3t_nxv2i16(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, <vscale x 2 x i16> %index, i64 %vl) { 12781; CHECK-LABEL: test_vsoxseg3_nxv2f64_triscv.vector.tuple_nxv16i8_3t_nxv2i16: 12782; CHECK: # %bb.0: # %entry 12783; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma 12784; CHECK-NEXT: vsoxseg3ei16.v v8, (a0), v14 12785; CHECK-NEXT: ret 12786entry: 12787 tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i16(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, <vscale x 2 x i16> %index, i64 %vl, i64 6) 12788 ret void 12789} 12790 12791define void @test_vsoxseg3_mask_nxv2f64_triscv.vector.tuple_nxv16i8_3t_nxv2i16(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, <vscale x 2 x i16> %index, i64 %vl, <vscale x 2 x i1> %mask) { 12792; CHECK-LABEL: test_vsoxseg3_mask_nxv2f64_triscv.vector.tuple_nxv16i8_3t_nxv2i16: 12793; CHECK: # %bb.0: # %entry 12794; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma 12795; CHECK-NEXT: vsoxseg3ei16.v v8, (a0), v14, v0.t 12796; CHECK-NEXT: ret 12797entry: 12798 tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i16.nxv2i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 6) 12799 ret void 12800} 12801 12802 12803define void @test_vsoxseg3_nxv2f64_triscv.vector.tuple_nxv16i8_3t_nxv2i32(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, <vscale x 2 x i32> %index, i64 %vl) { 12804; CHECK-LABEL: test_vsoxseg3_nxv2f64_triscv.vector.tuple_nxv16i8_3t_nxv2i32: 12805; CHECK: # %bb.0: # %entry 12806; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma 12807; CHECK-NEXT: vsoxseg3ei32.v v8, (a0), v14 12808; CHECK-NEXT: ret 12809entry: 12810 tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i32(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, <vscale x 2 x i32> %index, i64 %vl, i64 6) 12811 ret void 12812} 12813 12814define void @test_vsoxseg3_mask_nxv2f64_triscv.vector.tuple_nxv16i8_3t_nxv2i32(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, <vscale x 2 x i32> %index, i64 %vl, <vscale x 2 x i1> %mask) { 12815; CHECK-LABEL: test_vsoxseg3_mask_nxv2f64_triscv.vector.tuple_nxv16i8_3t_nxv2i32: 12816; CHECK: # %bb.0: # %entry 12817; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma 12818; CHECK-NEXT: vsoxseg3ei32.v v8, (a0), v14, v0.t 12819; CHECK-NEXT: ret 12820entry: 12821 tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i32.nxv2i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 6) 12822 ret void 12823} 12824 12825 12826define void @test_vsoxseg3_nxv2f64_triscv.vector.tuple_nxv16i8_3t_nxv2i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl) { 12827; CHECK-LABEL: test_vsoxseg3_nxv2f64_triscv.vector.tuple_nxv16i8_3t_nxv2i64: 12828; CHECK: # %bb.0: # %entry 12829; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma 12830; CHECK-NEXT: vsoxseg3ei64.v v8, (a0), v14 12831; CHECK-NEXT: ret 12832entry: 12833 tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl, i64 6) 12834 ret void 12835} 12836 12837define void @test_vsoxseg3_mask_nxv2f64_triscv.vector.tuple_nxv16i8_3t_nxv2i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl, <vscale x 2 x i1> %mask) { 12838; CHECK-LABEL: test_vsoxseg3_mask_nxv2f64_triscv.vector.tuple_nxv16i8_3t_nxv2i64: 12839; CHECK: # %bb.0: # %entry 12840; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma 12841; CHECK-NEXT: vsoxseg3ei64.v v8, (a0), v14, v0.t 12842; CHECK-NEXT: ret 12843entry: 12844 tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i64.nxv2i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 6) 12845 ret void 12846} 12847 12848 12849define void @test_vsoxseg4_nxv1f64_triscv.vector.tuple_nxv8i8_4t_nxv1i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, <vscale x 1 x i8> %index, i64 %vl) { 12850; CHECK-LABEL: test_vsoxseg4_nxv1f64_triscv.vector.tuple_nxv8i8_4t_nxv1i8: 12851; CHECK: # %bb.0: # %entry 12852; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma 12853; CHECK-NEXT: vsoxseg4ei8.v v8, (a0), v12 12854; CHECK-NEXT: ret 12855entry: 12856 tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, <vscale x 1 x i8> %index, i64 %vl, i64 6) 12857 ret void 12858} 12859 12860define void @test_vsoxseg4_mask_nxv1f64_triscv.vector.tuple_nxv8i8_4t_nxv1i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, <vscale x 1 x i8> %index, i64 %vl, <vscale x 1 x i1> %mask) { 12861; CHECK-LABEL: test_vsoxseg4_mask_nxv1f64_triscv.vector.tuple_nxv8i8_4t_nxv1i8: 12862; CHECK: # %bb.0: # %entry 12863; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma 12864; CHECK-NEXT: vsoxseg4ei8.v v8, (a0), v12, v0.t 12865; CHECK-NEXT: ret 12866entry: 12867 tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i8.nxv1i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 6) 12868 ret void 12869} 12870 12871 12872define void @test_vsoxseg4_nxv1f64_triscv.vector.tuple_nxv8i8_4t_nxv1i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl) { 12873; CHECK-LABEL: test_vsoxseg4_nxv1f64_triscv.vector.tuple_nxv8i8_4t_nxv1i16: 12874; CHECK: # %bb.0: # %entry 12875; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma 12876; CHECK-NEXT: vsoxseg4ei16.v v8, (a0), v12 12877; CHECK-NEXT: ret 12878entry: 12879 tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl, i64 6) 12880 ret void 12881} 12882 12883define void @test_vsoxseg4_mask_nxv1f64_triscv.vector.tuple_nxv8i8_4t_nxv1i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl, <vscale x 1 x i1> %mask) { 12884; CHECK-LABEL: test_vsoxseg4_mask_nxv1f64_triscv.vector.tuple_nxv8i8_4t_nxv1i16: 12885; CHECK: # %bb.0: # %entry 12886; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma 12887; CHECK-NEXT: vsoxseg4ei16.v v8, (a0), v12, v0.t 12888; CHECK-NEXT: ret 12889entry: 12890 tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i16.nxv1i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 6) 12891 ret void 12892} 12893 12894 12895define void @test_vsoxseg4_nxv1f64_triscv.vector.tuple_nxv8i8_4t_nxv1i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, <vscale x 1 x i32> %index, i64 %vl) { 12896; CHECK-LABEL: test_vsoxseg4_nxv1f64_triscv.vector.tuple_nxv8i8_4t_nxv1i32: 12897; CHECK: # %bb.0: # %entry 12898; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma 12899; CHECK-NEXT: vsoxseg4ei32.v v8, (a0), v12 12900; CHECK-NEXT: ret 12901entry: 12902 tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, <vscale x 1 x i32> %index, i64 %vl, i64 6) 12903 ret void 12904} 12905 12906define void @test_vsoxseg4_mask_nxv1f64_triscv.vector.tuple_nxv8i8_4t_nxv1i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, <vscale x 1 x i32> %index, i64 %vl, <vscale x 1 x i1> %mask) { 12907; CHECK-LABEL: test_vsoxseg4_mask_nxv1f64_triscv.vector.tuple_nxv8i8_4t_nxv1i32: 12908; CHECK: # %bb.0: # %entry 12909; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma 12910; CHECK-NEXT: vsoxseg4ei32.v v8, (a0), v12, v0.t 12911; CHECK-NEXT: ret 12912entry: 12913 tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i32.nxv1i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 6) 12914 ret void 12915} 12916 12917 12918define void @test_vsoxseg4_nxv1f64_triscv.vector.tuple_nxv8i8_4t_nxv1i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, <vscale x 1 x i64> %index, i64 %vl) { 12919; CHECK-LABEL: test_vsoxseg4_nxv1f64_triscv.vector.tuple_nxv8i8_4t_nxv1i64: 12920; CHECK: # %bb.0: # %entry 12921; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma 12922; CHECK-NEXT: vsoxseg4ei64.v v8, (a0), v12 12923; CHECK-NEXT: ret 12924entry: 12925 tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, <vscale x 1 x i64> %index, i64 %vl, i64 6) 12926 ret void 12927} 12928 12929define void @test_vsoxseg4_mask_nxv1f64_triscv.vector.tuple_nxv8i8_4t_nxv1i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, <vscale x 1 x i64> %index, i64 %vl, <vscale x 1 x i1> %mask) { 12930; CHECK-LABEL: test_vsoxseg4_mask_nxv1f64_triscv.vector.tuple_nxv8i8_4t_nxv1i64: 12931; CHECK: # %bb.0: # %entry 12932; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma 12933; CHECK-NEXT: vsoxseg4ei64.v v8, (a0), v12, v0.t 12934; CHECK-NEXT: ret 12935entry: 12936 tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i64.nxv1i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 6) 12937 ret void 12938} 12939 12940 12941define void @test_vsoxseg4_nxv2f64_triscv.vector.tuple_nxv16i8_4t_nxv2i8(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, <vscale x 2 x i8> %index, i64 %vl) { 12942; CHECK-LABEL: test_vsoxseg4_nxv2f64_triscv.vector.tuple_nxv16i8_4t_nxv2i8: 12943; CHECK: # %bb.0: # %entry 12944; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma 12945; CHECK-NEXT: vsoxseg4ei8.v v8, (a0), v16 12946; CHECK-NEXT: ret 12947entry: 12948 tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i8(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, <vscale x 2 x i8> %index, i64 %vl, i64 6) 12949 ret void 12950} 12951 12952define void @test_vsoxseg4_mask_nxv2f64_triscv.vector.tuple_nxv16i8_4t_nxv2i8(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, <vscale x 2 x i8> %index, i64 %vl, <vscale x 2 x i1> %mask) { 12953; CHECK-LABEL: test_vsoxseg4_mask_nxv2f64_triscv.vector.tuple_nxv16i8_4t_nxv2i8: 12954; CHECK: # %bb.0: # %entry 12955; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma 12956; CHECK-NEXT: vsoxseg4ei8.v v8, (a0), v16, v0.t 12957; CHECK-NEXT: ret 12958entry: 12959 tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i8.nxv2i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 6) 12960 ret void 12961} 12962 12963 12964define void @test_vsoxseg4_nxv2f64_triscv.vector.tuple_nxv16i8_4t_nxv2i16(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, <vscale x 2 x i16> %index, i64 %vl) { 12965; CHECK-LABEL: test_vsoxseg4_nxv2f64_triscv.vector.tuple_nxv16i8_4t_nxv2i16: 12966; CHECK: # %bb.0: # %entry 12967; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma 12968; CHECK-NEXT: vsoxseg4ei16.v v8, (a0), v16 12969; CHECK-NEXT: ret 12970entry: 12971 tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i16(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, <vscale x 2 x i16> %index, i64 %vl, i64 6) 12972 ret void 12973} 12974 12975define void @test_vsoxseg4_mask_nxv2f64_triscv.vector.tuple_nxv16i8_4t_nxv2i16(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, <vscale x 2 x i16> %index, i64 %vl, <vscale x 2 x i1> %mask) { 12976; CHECK-LABEL: test_vsoxseg4_mask_nxv2f64_triscv.vector.tuple_nxv16i8_4t_nxv2i16: 12977; CHECK: # %bb.0: # %entry 12978; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma 12979; CHECK-NEXT: vsoxseg4ei16.v v8, (a0), v16, v0.t 12980; CHECK-NEXT: ret 12981entry: 12982 tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i16.nxv2i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 6) 12983 ret void 12984} 12985 12986 12987define void @test_vsoxseg4_nxv2f64_triscv.vector.tuple_nxv16i8_4t_nxv2i32(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, <vscale x 2 x i32> %index, i64 %vl) { 12988; CHECK-LABEL: test_vsoxseg4_nxv2f64_triscv.vector.tuple_nxv16i8_4t_nxv2i32: 12989; CHECK: # %bb.0: # %entry 12990; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma 12991; CHECK-NEXT: vsoxseg4ei32.v v8, (a0), v16 12992; CHECK-NEXT: ret 12993entry: 12994 tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i32(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, <vscale x 2 x i32> %index, i64 %vl, i64 6) 12995 ret void 12996} 12997 12998define void @test_vsoxseg4_mask_nxv2f64_triscv.vector.tuple_nxv16i8_4t_nxv2i32(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, <vscale x 2 x i32> %index, i64 %vl, <vscale x 2 x i1> %mask) { 12999; CHECK-LABEL: test_vsoxseg4_mask_nxv2f64_triscv.vector.tuple_nxv16i8_4t_nxv2i32: 13000; CHECK: # %bb.0: # %entry 13001; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma 13002; CHECK-NEXT: vsoxseg4ei32.v v8, (a0), v16, v0.t 13003; CHECK-NEXT: ret 13004entry: 13005 tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i32.nxv2i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 6) 13006 ret void 13007} 13008 13009 13010define void @test_vsoxseg4_nxv2f64_triscv.vector.tuple_nxv16i8_4t_nxv2i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl) { 13011; CHECK-LABEL: test_vsoxseg4_nxv2f64_triscv.vector.tuple_nxv16i8_4t_nxv2i64: 13012; CHECK: # %bb.0: # %entry 13013; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma 13014; CHECK-NEXT: vsoxseg4ei64.v v8, (a0), v16 13015; CHECK-NEXT: ret 13016entry: 13017 tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl, i64 6) 13018 ret void 13019} 13020 13021define void @test_vsoxseg4_mask_nxv2f64_triscv.vector.tuple_nxv16i8_4t_nxv2i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl, <vscale x 2 x i1> %mask) { 13022; CHECK-LABEL: test_vsoxseg4_mask_nxv2f64_triscv.vector.tuple_nxv16i8_4t_nxv2i64: 13023; CHECK: # %bb.0: # %entry 13024; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma 13025; CHECK-NEXT: vsoxseg4ei64.v v8, (a0), v16, v0.t 13026; CHECK-NEXT: ret 13027entry: 13028 tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i64.nxv2i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 6) 13029 ret void 13030} 13031 13032 13033define void @test_vsoxseg5_nxv1f64_triscv.vector.tuple_nxv8i8_5t_nxv1i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, <vscale x 1 x i8> %index, i64 %vl) { 13034; CHECK-LABEL: test_vsoxseg5_nxv1f64_triscv.vector.tuple_nxv8i8_5t_nxv1i8: 13035; CHECK: # %bb.0: # %entry 13036; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma 13037; CHECK-NEXT: vsoxseg5ei8.v v8, (a0), v13 13038; CHECK-NEXT: ret 13039entry: 13040 tail call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, <vscale x 1 x i8> %index, i64 %vl, i64 6) 13041 ret void 13042} 13043 13044define void @test_vsoxseg5_mask_nxv1f64_triscv.vector.tuple_nxv8i8_5t_nxv1i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, <vscale x 1 x i8> %index, i64 %vl, <vscale x 1 x i1> %mask) { 13045; CHECK-LABEL: test_vsoxseg5_mask_nxv1f64_triscv.vector.tuple_nxv8i8_5t_nxv1i8: 13046; CHECK: # %bb.0: # %entry 13047; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma 13048; CHECK-NEXT: vsoxseg5ei8.v v8, (a0), v13, v0.t 13049; CHECK-NEXT: ret 13050entry: 13051 tail call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i8.nxv1i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 6) 13052 ret void 13053} 13054 13055 13056define void @test_vsoxseg5_nxv1f64_triscv.vector.tuple_nxv8i8_5t_nxv1i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl) { 13057; CHECK-LABEL: test_vsoxseg5_nxv1f64_triscv.vector.tuple_nxv8i8_5t_nxv1i16: 13058; CHECK: # %bb.0: # %entry 13059; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma 13060; CHECK-NEXT: vsoxseg5ei16.v v8, (a0), v13 13061; CHECK-NEXT: ret 13062entry: 13063 tail call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl, i64 6) 13064 ret void 13065} 13066 13067define void @test_vsoxseg5_mask_nxv1f64_triscv.vector.tuple_nxv8i8_5t_nxv1i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl, <vscale x 1 x i1> %mask) { 13068; CHECK-LABEL: test_vsoxseg5_mask_nxv1f64_triscv.vector.tuple_nxv8i8_5t_nxv1i16: 13069; CHECK: # %bb.0: # %entry 13070; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma 13071; CHECK-NEXT: vsoxseg5ei16.v v8, (a0), v13, v0.t 13072; CHECK-NEXT: ret 13073entry: 13074 tail call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i16.nxv1i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 6) 13075 ret void 13076} 13077 13078 13079define void @test_vsoxseg5_nxv1f64_triscv.vector.tuple_nxv8i8_5t_nxv1i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, <vscale x 1 x i32> %index, i64 %vl) { 13080; CHECK-LABEL: test_vsoxseg5_nxv1f64_triscv.vector.tuple_nxv8i8_5t_nxv1i32: 13081; CHECK: # %bb.0: # %entry 13082; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma 13083; CHECK-NEXT: vsoxseg5ei32.v v8, (a0), v13 13084; CHECK-NEXT: ret 13085entry: 13086 tail call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, <vscale x 1 x i32> %index, i64 %vl, i64 6) 13087 ret void 13088} 13089 13090define void @test_vsoxseg5_mask_nxv1f64_triscv.vector.tuple_nxv8i8_5t_nxv1i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, <vscale x 1 x i32> %index, i64 %vl, <vscale x 1 x i1> %mask) { 13091; CHECK-LABEL: test_vsoxseg5_mask_nxv1f64_triscv.vector.tuple_nxv8i8_5t_nxv1i32: 13092; CHECK: # %bb.0: # %entry 13093; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma 13094; CHECK-NEXT: vsoxseg5ei32.v v8, (a0), v13, v0.t 13095; CHECK-NEXT: ret 13096entry: 13097 tail call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i32.nxv1i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 6) 13098 ret void 13099} 13100 13101 13102define void @test_vsoxseg5_nxv1f64_triscv.vector.tuple_nxv8i8_5t_nxv1i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, <vscale x 1 x i64> %index, i64 %vl) { 13103; CHECK-LABEL: test_vsoxseg5_nxv1f64_triscv.vector.tuple_nxv8i8_5t_nxv1i64: 13104; CHECK: # %bb.0: # %entry 13105; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma 13106; CHECK-NEXT: vsoxseg5ei64.v v8, (a0), v13 13107; CHECK-NEXT: ret 13108entry: 13109 tail call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, <vscale x 1 x i64> %index, i64 %vl, i64 6) 13110 ret void 13111} 13112 13113define void @test_vsoxseg5_mask_nxv1f64_triscv.vector.tuple_nxv8i8_5t_nxv1i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, <vscale x 1 x i64> %index, i64 %vl, <vscale x 1 x i1> %mask) { 13114; CHECK-LABEL: test_vsoxseg5_mask_nxv1f64_triscv.vector.tuple_nxv8i8_5t_nxv1i64: 13115; CHECK: # %bb.0: # %entry 13116; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma 13117; CHECK-NEXT: vsoxseg5ei64.v v8, (a0), v13, v0.t 13118; CHECK-NEXT: ret 13119entry: 13120 tail call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i64.nxv1i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 6) 13121 ret void 13122} 13123 13124 13125define void @test_vsoxseg6_nxv1f64_triscv.vector.tuple_nxv8i8_6t_nxv1i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, <vscale x 1 x i8> %index, i64 %vl) { 13126; CHECK-LABEL: test_vsoxseg6_nxv1f64_triscv.vector.tuple_nxv8i8_6t_nxv1i8: 13127; CHECK: # %bb.0: # %entry 13128; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma 13129; CHECK-NEXT: vsoxseg6ei8.v v8, (a0), v14 13130; CHECK-NEXT: ret 13131entry: 13132 tail call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, <vscale x 1 x i8> %index, i64 %vl, i64 6) 13133 ret void 13134} 13135 13136define void @test_vsoxseg6_mask_nxv1f64_triscv.vector.tuple_nxv8i8_6t_nxv1i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, <vscale x 1 x i8> %index, i64 %vl, <vscale x 1 x i1> %mask) { 13137; CHECK-LABEL: test_vsoxseg6_mask_nxv1f64_triscv.vector.tuple_nxv8i8_6t_nxv1i8: 13138; CHECK: # %bb.0: # %entry 13139; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma 13140; CHECK-NEXT: vsoxseg6ei8.v v8, (a0), v14, v0.t 13141; CHECK-NEXT: ret 13142entry: 13143 tail call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i8.nxv1i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 6) 13144 ret void 13145} 13146 13147 13148define void @test_vsoxseg6_nxv1f64_triscv.vector.tuple_nxv8i8_6t_nxv1i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl) { 13149; CHECK-LABEL: test_vsoxseg6_nxv1f64_triscv.vector.tuple_nxv8i8_6t_nxv1i16: 13150; CHECK: # %bb.0: # %entry 13151; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma 13152; CHECK-NEXT: vsoxseg6ei16.v v8, (a0), v14 13153; CHECK-NEXT: ret 13154entry: 13155 tail call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl, i64 6) 13156 ret void 13157} 13158 13159define void @test_vsoxseg6_mask_nxv1f64_triscv.vector.tuple_nxv8i8_6t_nxv1i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl, <vscale x 1 x i1> %mask) { 13160; CHECK-LABEL: test_vsoxseg6_mask_nxv1f64_triscv.vector.tuple_nxv8i8_6t_nxv1i16: 13161; CHECK: # %bb.0: # %entry 13162; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma 13163; CHECK-NEXT: vsoxseg6ei16.v v8, (a0), v14, v0.t 13164; CHECK-NEXT: ret 13165entry: 13166 tail call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i16.nxv1i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 6) 13167 ret void 13168} 13169 13170 13171define void @test_vsoxseg6_nxv1f64_triscv.vector.tuple_nxv8i8_6t_nxv1i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, <vscale x 1 x i32> %index, i64 %vl) { 13172; CHECK-LABEL: test_vsoxseg6_nxv1f64_triscv.vector.tuple_nxv8i8_6t_nxv1i32: 13173; CHECK: # %bb.0: # %entry 13174; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma 13175; CHECK-NEXT: vsoxseg6ei32.v v8, (a0), v14 13176; CHECK-NEXT: ret 13177entry: 13178 tail call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, <vscale x 1 x i32> %index, i64 %vl, i64 6) 13179 ret void 13180} 13181 13182define void @test_vsoxseg6_mask_nxv1f64_triscv.vector.tuple_nxv8i8_6t_nxv1i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, <vscale x 1 x i32> %index, i64 %vl, <vscale x 1 x i1> %mask) { 13183; CHECK-LABEL: test_vsoxseg6_mask_nxv1f64_triscv.vector.tuple_nxv8i8_6t_nxv1i32: 13184; CHECK: # %bb.0: # %entry 13185; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma 13186; CHECK-NEXT: vsoxseg6ei32.v v8, (a0), v14, v0.t 13187; CHECK-NEXT: ret 13188entry: 13189 tail call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i32.nxv1i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 6) 13190 ret void 13191} 13192 13193 13194define void @test_vsoxseg6_nxv1f64_triscv.vector.tuple_nxv8i8_6t_nxv1i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, <vscale x 1 x i64> %index, i64 %vl) { 13195; CHECK-LABEL: test_vsoxseg6_nxv1f64_triscv.vector.tuple_nxv8i8_6t_nxv1i64: 13196; CHECK: # %bb.0: # %entry 13197; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma 13198; CHECK-NEXT: vsoxseg6ei64.v v8, (a0), v14 13199; CHECK-NEXT: ret 13200entry: 13201 tail call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, <vscale x 1 x i64> %index, i64 %vl, i64 6) 13202 ret void 13203} 13204 13205define void @test_vsoxseg6_mask_nxv1f64_triscv.vector.tuple_nxv8i8_6t_nxv1i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, <vscale x 1 x i64> %index, i64 %vl, <vscale x 1 x i1> %mask) { 13206; CHECK-LABEL: test_vsoxseg6_mask_nxv1f64_triscv.vector.tuple_nxv8i8_6t_nxv1i64: 13207; CHECK: # %bb.0: # %entry 13208; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma 13209; CHECK-NEXT: vsoxseg6ei64.v v8, (a0), v14, v0.t 13210; CHECK-NEXT: ret 13211entry: 13212 tail call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i64.nxv1i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 6) 13213 ret void 13214} 13215 13216 13217define void @test_vsoxseg7_nxv1f64_triscv.vector.tuple_nxv8i8_7t_nxv1i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, <vscale x 1 x i8> %index, i64 %vl) { 13218; CHECK-LABEL: test_vsoxseg7_nxv1f64_triscv.vector.tuple_nxv8i8_7t_nxv1i8: 13219; CHECK: # %bb.0: # %entry 13220; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma 13221; CHECK-NEXT: vsoxseg7ei8.v v8, (a0), v15 13222; CHECK-NEXT: ret 13223entry: 13224 tail call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, <vscale x 1 x i8> %index, i64 %vl, i64 6) 13225 ret void 13226} 13227 13228define void @test_vsoxseg7_mask_nxv1f64_triscv.vector.tuple_nxv8i8_7t_nxv1i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, <vscale x 1 x i8> %index, i64 %vl, <vscale x 1 x i1> %mask) { 13229; CHECK-LABEL: test_vsoxseg7_mask_nxv1f64_triscv.vector.tuple_nxv8i8_7t_nxv1i8: 13230; CHECK: # %bb.0: # %entry 13231; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma 13232; CHECK-NEXT: vsoxseg7ei8.v v8, (a0), v15, v0.t 13233; CHECK-NEXT: ret 13234entry: 13235 tail call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i8.nxv1i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 6) 13236 ret void 13237} 13238 13239 13240define void @test_vsoxseg7_nxv1f64_triscv.vector.tuple_nxv8i8_7t_nxv1i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl) { 13241; CHECK-LABEL: test_vsoxseg7_nxv1f64_triscv.vector.tuple_nxv8i8_7t_nxv1i16: 13242; CHECK: # %bb.0: # %entry 13243; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma 13244; CHECK-NEXT: vsoxseg7ei16.v v8, (a0), v15 13245; CHECK-NEXT: ret 13246entry: 13247 tail call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl, i64 6) 13248 ret void 13249} 13250 13251define void @test_vsoxseg7_mask_nxv1f64_triscv.vector.tuple_nxv8i8_7t_nxv1i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl, <vscale x 1 x i1> %mask) { 13252; CHECK-LABEL: test_vsoxseg7_mask_nxv1f64_triscv.vector.tuple_nxv8i8_7t_nxv1i16: 13253; CHECK: # %bb.0: # %entry 13254; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma 13255; CHECK-NEXT: vsoxseg7ei16.v v8, (a0), v15, v0.t 13256; CHECK-NEXT: ret 13257entry: 13258 tail call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i16.nxv1i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 6) 13259 ret void 13260} 13261 13262 13263define void @test_vsoxseg7_nxv1f64_triscv.vector.tuple_nxv8i8_7t_nxv1i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, <vscale x 1 x i32> %index, i64 %vl) { 13264; CHECK-LABEL: test_vsoxseg7_nxv1f64_triscv.vector.tuple_nxv8i8_7t_nxv1i32: 13265; CHECK: # %bb.0: # %entry 13266; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma 13267; CHECK-NEXT: vsoxseg7ei32.v v8, (a0), v15 13268; CHECK-NEXT: ret 13269entry: 13270 tail call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, <vscale x 1 x i32> %index, i64 %vl, i64 6) 13271 ret void 13272} 13273 13274define void @test_vsoxseg7_mask_nxv1f64_triscv.vector.tuple_nxv8i8_7t_nxv1i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, <vscale x 1 x i32> %index, i64 %vl, <vscale x 1 x i1> %mask) { 13275; CHECK-LABEL: test_vsoxseg7_mask_nxv1f64_triscv.vector.tuple_nxv8i8_7t_nxv1i32: 13276; CHECK: # %bb.0: # %entry 13277; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma 13278; CHECK-NEXT: vsoxseg7ei32.v v8, (a0), v15, v0.t 13279; CHECK-NEXT: ret 13280entry: 13281 tail call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i32.nxv1i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 6) 13282 ret void 13283} 13284 13285 13286define void @test_vsoxseg7_nxv1f64_triscv.vector.tuple_nxv8i8_7t_nxv1i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, <vscale x 1 x i64> %index, i64 %vl) { 13287; CHECK-LABEL: test_vsoxseg7_nxv1f64_triscv.vector.tuple_nxv8i8_7t_nxv1i64: 13288; CHECK: # %bb.0: # %entry 13289; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma 13290; CHECK-NEXT: vsoxseg7ei64.v v8, (a0), v15 13291; CHECK-NEXT: ret 13292entry: 13293 tail call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, <vscale x 1 x i64> %index, i64 %vl, i64 6) 13294 ret void 13295} 13296 13297define void @test_vsoxseg7_mask_nxv1f64_triscv.vector.tuple_nxv8i8_7t_nxv1i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, <vscale x 1 x i64> %index, i64 %vl, <vscale x 1 x i1> %mask) { 13298; CHECK-LABEL: test_vsoxseg7_mask_nxv1f64_triscv.vector.tuple_nxv8i8_7t_nxv1i64: 13299; CHECK: # %bb.0: # %entry 13300; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma 13301; CHECK-NEXT: vsoxseg7ei64.v v8, (a0), v15, v0.t 13302; CHECK-NEXT: ret 13303entry: 13304 tail call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i64.nxv1i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 6) 13305 ret void 13306} 13307 13308 13309define void @test_vsoxseg8_nxv1f64_triscv.vector.tuple_nxv8i8_8t_nxv1i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, <vscale x 1 x i8> %index, i64 %vl) { 13310; CHECK-LABEL: test_vsoxseg8_nxv1f64_triscv.vector.tuple_nxv8i8_8t_nxv1i8: 13311; CHECK: # %bb.0: # %entry 13312; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma 13313; CHECK-NEXT: vsoxseg8ei8.v v8, (a0), v16 13314; CHECK-NEXT: ret 13315entry: 13316 tail call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, <vscale x 1 x i8> %index, i64 %vl, i64 6) 13317 ret void 13318} 13319 13320define void @test_vsoxseg8_mask_nxv1f64_triscv.vector.tuple_nxv8i8_8t_nxv1i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, <vscale x 1 x i8> %index, i64 %vl, <vscale x 1 x i1> %mask) { 13321; CHECK-LABEL: test_vsoxseg8_mask_nxv1f64_triscv.vector.tuple_nxv8i8_8t_nxv1i8: 13322; CHECK: # %bb.0: # %entry 13323; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma 13324; CHECK-NEXT: vsoxseg8ei8.v v8, (a0), v16, v0.t 13325; CHECK-NEXT: ret 13326entry: 13327 tail call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i8.nxv1i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 6) 13328 ret void 13329} 13330 13331 13332define void @test_vsoxseg8_nxv1f64_triscv.vector.tuple_nxv8i8_8t_nxv1i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl) { 13333; CHECK-LABEL: test_vsoxseg8_nxv1f64_triscv.vector.tuple_nxv8i8_8t_nxv1i16: 13334; CHECK: # %bb.0: # %entry 13335; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma 13336; CHECK-NEXT: vsoxseg8ei16.v v8, (a0), v16 13337; CHECK-NEXT: ret 13338entry: 13339 tail call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl, i64 6) 13340 ret void 13341} 13342 13343define void @test_vsoxseg8_mask_nxv1f64_triscv.vector.tuple_nxv8i8_8t_nxv1i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl, <vscale x 1 x i1> %mask) { 13344; CHECK-LABEL: test_vsoxseg8_mask_nxv1f64_triscv.vector.tuple_nxv8i8_8t_nxv1i16: 13345; CHECK: # %bb.0: # %entry 13346; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma 13347; CHECK-NEXT: vsoxseg8ei16.v v8, (a0), v16, v0.t 13348; CHECK-NEXT: ret 13349entry: 13350 tail call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i16.nxv1i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 6) 13351 ret void 13352} 13353 13354 13355define void @test_vsoxseg8_nxv1f64_triscv.vector.tuple_nxv8i8_8t_nxv1i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, <vscale x 1 x i32> %index, i64 %vl) { 13356; CHECK-LABEL: test_vsoxseg8_nxv1f64_triscv.vector.tuple_nxv8i8_8t_nxv1i32: 13357; CHECK: # %bb.0: # %entry 13358; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma 13359; CHECK-NEXT: vsoxseg8ei32.v v8, (a0), v16 13360; CHECK-NEXT: ret 13361entry: 13362 tail call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, <vscale x 1 x i32> %index, i64 %vl, i64 6) 13363 ret void 13364} 13365 13366define void @test_vsoxseg8_mask_nxv1f64_triscv.vector.tuple_nxv8i8_8t_nxv1i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, <vscale x 1 x i32> %index, i64 %vl, <vscale x 1 x i1> %mask) { 13367; CHECK-LABEL: test_vsoxseg8_mask_nxv1f64_triscv.vector.tuple_nxv8i8_8t_nxv1i32: 13368; CHECK: # %bb.0: # %entry 13369; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma 13370; CHECK-NEXT: vsoxseg8ei32.v v8, (a0), v16, v0.t 13371; CHECK-NEXT: ret 13372entry: 13373 tail call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 6) 13374 ret void 13375} 13376 13377 13378define void @test_vsoxseg8_nxv1f64_triscv.vector.tuple_nxv8i8_8t_nxv1i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, <vscale x 1 x i64> %index, i64 %vl) { 13379; CHECK-LABEL: test_vsoxseg8_nxv1f64_triscv.vector.tuple_nxv8i8_8t_nxv1i64: 13380; CHECK: # %bb.0: # %entry 13381; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma 13382; CHECK-NEXT: vsoxseg8ei64.v v8, (a0), v16 13383; CHECK-NEXT: ret 13384entry: 13385 tail call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, <vscale x 1 x i64> %index, i64 %vl, i64 6) 13386 ret void 13387} 13388 13389define void @test_vsoxseg8_mask_nxv1f64_triscv.vector.tuple_nxv8i8_8t_nxv1i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, <vscale x 1 x i64> %index, i64 %vl, <vscale x 1 x i1> %mask) { 13390; CHECK-LABEL: test_vsoxseg8_mask_nxv1f64_triscv.vector.tuple_nxv8i8_8t_nxv1i64: 13391; CHECK: # %bb.0: # %entry 13392; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma 13393; CHECK-NEXT: vsoxseg8ei64.v v8, (a0), v16, v0.t 13394; CHECK-NEXT: ret 13395entry: 13396 tail call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i64.nxv1i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 6) 13397 ret void 13398} 13399 13400 13401define void @test_vsoxseg2_nxv1bf16_triscv.vector.tuple_nxv2i8_2t_nxv1i8(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) %val, ptr %base, <vscale x 1 x i8> %index, i64 %vl) { 13402; CHECK-LABEL: test_vsoxseg2_nxv1bf16_triscv.vector.tuple_nxv2i8_2t_nxv1i8: 13403; CHECK: # %bb.0: # %entry 13404; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma 13405; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10 13406; CHECK-NEXT: ret 13407entry: 13408 tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i8(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) %val, ptr %base, <vscale x 1 x i8> %index, i64 %vl, i64 4) 13409 ret void 13410} 13411 13412define void @test_vsoxseg2_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_2t_nxv1i8(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) %val, ptr %base, <vscale x 1 x i8> %index, i64 %vl, <vscale x 1 x i1> %mask) { 13413; CHECK-LABEL: test_vsoxseg2_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_2t_nxv1i8: 13414; CHECK: # %bb.0: # %entry 13415; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma 13416; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10, v0.t 13417; CHECK-NEXT: ret 13418entry: 13419 tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 4) 13420 ret void 13421} 13422 13423 13424define void @test_vsoxseg2_nxv1bf16_triscv.vector.tuple_nxv2i8_2t_nxv1i16(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl) { 13425; CHECK-LABEL: test_vsoxseg2_nxv1bf16_triscv.vector.tuple_nxv2i8_2t_nxv1i16: 13426; CHECK: # %bb.0: # %entry 13427; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma 13428; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10 13429; CHECK-NEXT: ret 13430entry: 13431 tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i16(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl, i64 4) 13432 ret void 13433} 13434 13435define void @test_vsoxseg2_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_2t_nxv1i16(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl, <vscale x 1 x i1> %mask) { 13436; CHECK-LABEL: test_vsoxseg2_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_2t_nxv1i16: 13437; CHECK: # %bb.0: # %entry 13438; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma 13439; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10, v0.t 13440; CHECK-NEXT: ret 13441entry: 13442 tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 4) 13443 ret void 13444} 13445 13446 13447define void @test_vsoxseg2_nxv1bf16_triscv.vector.tuple_nxv2i8_2t_nxv1i32(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) %val, ptr %base, <vscale x 1 x i32> %index, i64 %vl) { 13448; CHECK-LABEL: test_vsoxseg2_nxv1bf16_triscv.vector.tuple_nxv2i8_2t_nxv1i32: 13449; CHECK: # %bb.0: # %entry 13450; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma 13451; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10 13452; CHECK-NEXT: ret 13453entry: 13454 tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i32(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) %val, ptr %base, <vscale x 1 x i32> %index, i64 %vl, i64 4) 13455 ret void 13456} 13457 13458define void @test_vsoxseg2_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_2t_nxv1i32(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) %val, ptr %base, <vscale x 1 x i32> %index, i64 %vl, <vscale x 1 x i1> %mask) { 13459; CHECK-LABEL: test_vsoxseg2_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_2t_nxv1i32: 13460; CHECK: # %bb.0: # %entry 13461; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma 13462; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10, v0.t 13463; CHECK-NEXT: ret 13464entry: 13465 tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 4) 13466 ret void 13467} 13468 13469 13470define void @test_vsoxseg2_nxv1bf16_triscv.vector.tuple_nxv2i8_2t_nxv1i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) %val, ptr %base, <vscale x 1 x i64> %index, i64 %vl) { 13471; CHECK-LABEL: test_vsoxseg2_nxv1bf16_triscv.vector.tuple_nxv2i8_2t_nxv1i64: 13472; CHECK: # %bb.0: # %entry 13473; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma 13474; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v10 13475; CHECK-NEXT: ret 13476entry: 13477 tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) %val, ptr %base, <vscale x 1 x i64> %index, i64 %vl, i64 4) 13478 ret void 13479} 13480 13481define void @test_vsoxseg2_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_2t_nxv1i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) %val, ptr %base, <vscale x 1 x i64> %index, i64 %vl, <vscale x 1 x i1> %mask) { 13482; CHECK-LABEL: test_vsoxseg2_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_2t_nxv1i64: 13483; CHECK: # %bb.0: # %entry 13484; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma 13485; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v10, v0.t 13486; CHECK-NEXT: ret 13487entry: 13488 tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i64.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) %val, ptr %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 4) 13489 ret void 13490} 13491 13492 13493define void @test_vsoxseg2_nxv2bf16_triscv.vector.tuple_nxv4i8_2t_nxv2i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %val, ptr %base, <vscale x 2 x i8> %index, i64 %vl) { 13494; CHECK-LABEL: test_vsoxseg2_nxv2bf16_triscv.vector.tuple_nxv4i8_2t_nxv2i8: 13495; CHECK: # %bb.0: # %entry 13496; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma 13497; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10 13498; CHECK-NEXT: ret 13499entry: 13500 tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %val, ptr %base, <vscale x 2 x i8> %index, i64 %vl, i64 4) 13501 ret void 13502} 13503 13504define void @test_vsoxseg2_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_2t_nxv2i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %val, ptr %base, <vscale x 2 x i8> %index, i64 %vl, <vscale x 2 x i1> %mask) { 13505; CHECK-LABEL: test_vsoxseg2_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_2t_nxv2i8: 13506; CHECK: # %bb.0: # %entry 13507; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma 13508; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10, v0.t 13509; CHECK-NEXT: ret 13510entry: 13511 tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 4) 13512 ret void 13513} 13514 13515 13516define void @test_vsoxseg2_nxv2bf16_triscv.vector.tuple_nxv4i8_2t_nxv2i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %val, ptr %base, <vscale x 2 x i16> %index, i64 %vl) { 13517; CHECK-LABEL: test_vsoxseg2_nxv2bf16_triscv.vector.tuple_nxv4i8_2t_nxv2i16: 13518; CHECK: # %bb.0: # %entry 13519; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma 13520; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10 13521; CHECK-NEXT: ret 13522entry: 13523 tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %val, ptr %base, <vscale x 2 x i16> %index, i64 %vl, i64 4) 13524 ret void 13525} 13526 13527define void @test_vsoxseg2_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_2t_nxv2i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %val, ptr %base, <vscale x 2 x i16> %index, i64 %vl, <vscale x 2 x i1> %mask) { 13528; CHECK-LABEL: test_vsoxseg2_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_2t_nxv2i16: 13529; CHECK: # %bb.0: # %entry 13530; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma 13531; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10, v0.t 13532; CHECK-NEXT: ret 13533entry: 13534 tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 4) 13535 ret void 13536} 13537 13538 13539define void @test_vsoxseg2_nxv2bf16_triscv.vector.tuple_nxv4i8_2t_nxv2i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %val, ptr %base, <vscale x 2 x i32> %index, i64 %vl) { 13540; CHECK-LABEL: test_vsoxseg2_nxv2bf16_triscv.vector.tuple_nxv4i8_2t_nxv2i32: 13541; CHECK: # %bb.0: # %entry 13542; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma 13543; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10 13544; CHECK-NEXT: ret 13545entry: 13546 tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %val, ptr %base, <vscale x 2 x i32> %index, i64 %vl, i64 4) 13547 ret void 13548} 13549 13550define void @test_vsoxseg2_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_2t_nxv2i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %val, ptr %base, <vscale x 2 x i32> %index, i64 %vl, <vscale x 2 x i1> %mask) { 13551; CHECK-LABEL: test_vsoxseg2_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_2t_nxv2i32: 13552; CHECK: # %bb.0: # %entry 13553; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma 13554; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10, v0.t 13555; CHECK-NEXT: ret 13556entry: 13557 tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 4) 13558 ret void 13559} 13560 13561 13562define void @test_vsoxseg2_nxv2bf16_triscv.vector.tuple_nxv4i8_2t_nxv2i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl) { 13563; CHECK-LABEL: test_vsoxseg2_nxv2bf16_triscv.vector.tuple_nxv4i8_2t_nxv2i64: 13564; CHECK: # %bb.0: # %entry 13565; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma 13566; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v10 13567; CHECK-NEXT: ret 13568entry: 13569 tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl, i64 4) 13570 ret void 13571} 13572 13573define void @test_vsoxseg2_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_2t_nxv2i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl, <vscale x 2 x i1> %mask) { 13574; CHECK-LABEL: test_vsoxseg2_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_2t_nxv2i64: 13575; CHECK: # %bb.0: # %entry 13576; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma 13577; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v10, v0.t 13578; CHECK-NEXT: ret 13579entry: 13580 tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i64.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %val, ptr %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 4) 13581 ret void 13582} 13583 13584 13585define void @test_vsoxseg2_nxv4bf16_triscv.vector.tuple_nxv8i8_2t_nxv4i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, <vscale x 4 x i8> %index, i64 %vl) { 13586; CHECK-LABEL: test_vsoxseg2_nxv4bf16_triscv.vector.tuple_nxv8i8_2t_nxv4i8: 13587; CHECK: # %bb.0: # %entry 13588; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma 13589; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10 13590; CHECK-NEXT: ret 13591entry: 13592 tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, <vscale x 4 x i8> %index, i64 %vl, i64 4) 13593 ret void 13594} 13595 13596define void @test_vsoxseg2_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_2t_nxv4i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, <vscale x 4 x i8> %index, i64 %vl, <vscale x 4 x i1> %mask) { 13597; CHECK-LABEL: test_vsoxseg2_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_2t_nxv4i8: 13598; CHECK: # %bb.0: # %entry 13599; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma 13600; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10, v0.t 13601; CHECK-NEXT: ret 13602entry: 13603 tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 4) 13604 ret void 13605} 13606 13607 13608define void @test_vsoxseg2_nxv4bf16_triscv.vector.tuple_nxv8i8_2t_nxv4i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, <vscale x 4 x i16> %index, i64 %vl) { 13609; CHECK-LABEL: test_vsoxseg2_nxv4bf16_triscv.vector.tuple_nxv8i8_2t_nxv4i16: 13610; CHECK: # %bb.0: # %entry 13611; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma 13612; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10 13613; CHECK-NEXT: ret 13614entry: 13615 tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, <vscale x 4 x i16> %index, i64 %vl, i64 4) 13616 ret void 13617} 13618 13619define void @test_vsoxseg2_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_2t_nxv4i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, <vscale x 4 x i16> %index, i64 %vl, <vscale x 4 x i1> %mask) { 13620; CHECK-LABEL: test_vsoxseg2_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_2t_nxv4i16: 13621; CHECK: # %bb.0: # %entry 13622; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma 13623; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10, v0.t 13624; CHECK-NEXT: ret 13625entry: 13626 tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 4) 13627 ret void 13628} 13629 13630 13631define void @test_vsoxseg2_nxv4bf16_triscv.vector.tuple_nxv8i8_2t_nxv4i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, <vscale x 4 x i32> %index, i64 %vl) { 13632; CHECK-LABEL: test_vsoxseg2_nxv4bf16_triscv.vector.tuple_nxv8i8_2t_nxv4i32: 13633; CHECK: # %bb.0: # %entry 13634; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma 13635; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10 13636; CHECK-NEXT: ret 13637entry: 13638 tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, <vscale x 4 x i32> %index, i64 %vl, i64 4) 13639 ret void 13640} 13641 13642define void @test_vsoxseg2_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_2t_nxv4i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, <vscale x 4 x i32> %index, i64 %vl, <vscale x 4 x i1> %mask) { 13643; CHECK-LABEL: test_vsoxseg2_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_2t_nxv4i32: 13644; CHECK: # %bb.0: # %entry 13645; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma 13646; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10, v0.t 13647; CHECK-NEXT: ret 13648entry: 13649 tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 4) 13650 ret void 13651} 13652 13653 13654define void @test_vsoxseg2_nxv4bf16_triscv.vector.tuple_nxv8i8_2t_nxv4i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, <vscale x 4 x i64> %index, i64 %vl) { 13655; CHECK-LABEL: test_vsoxseg2_nxv4bf16_triscv.vector.tuple_nxv8i8_2t_nxv4i64: 13656; CHECK: # %bb.0: # %entry 13657; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma 13658; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v12 13659; CHECK-NEXT: ret 13660entry: 13661 tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, <vscale x 4 x i64> %index, i64 %vl, i64 4) 13662 ret void 13663} 13664 13665define void @test_vsoxseg2_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_2t_nxv4i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, <vscale x 4 x i64> %index, i64 %vl, <vscale x 4 x i1> %mask) { 13666; CHECK-LABEL: test_vsoxseg2_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_2t_nxv4i64: 13667; CHECK: # %bb.0: # %entry 13668; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma 13669; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v12, v0.t 13670; CHECK-NEXT: ret 13671entry: 13672 tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i64.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 4) 13673 ret void 13674} 13675 13676 13677define void @test_vsoxseg2_nxv8bf16_triscv.vector.tuple_nxv16i8_2t_nxv8i8(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, <vscale x 8 x i8> %index, i64 %vl) { 13678; CHECK-LABEL: test_vsoxseg2_nxv8bf16_triscv.vector.tuple_nxv16i8_2t_nxv8i8: 13679; CHECK: # %bb.0: # %entry 13680; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma 13681; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v12 13682; CHECK-NEXT: ret 13683entry: 13684 tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i8(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, <vscale x 8 x i8> %index, i64 %vl, i64 4) 13685 ret void 13686} 13687 13688define void @test_vsoxseg2_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_2t_nxv8i8(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, <vscale x 8 x i8> %index, i64 %vl, <vscale x 8 x i1> %mask) { 13689; CHECK-LABEL: test_vsoxseg2_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_2t_nxv8i8: 13690; CHECK: # %bb.0: # %entry 13691; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma 13692; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v12, v0.t 13693; CHECK-NEXT: ret 13694entry: 13695 tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, <vscale x 8 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 4) 13696 ret void 13697} 13698 13699 13700define void @test_vsoxseg2_nxv8bf16_triscv.vector.tuple_nxv16i8_2t_nxv8i16(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, <vscale x 8 x i16> %index, i64 %vl) { 13701; CHECK-LABEL: test_vsoxseg2_nxv8bf16_triscv.vector.tuple_nxv16i8_2t_nxv8i16: 13702; CHECK: # %bb.0: # %entry 13703; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma 13704; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v12 13705; CHECK-NEXT: ret 13706entry: 13707 tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i16(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, <vscale x 8 x i16> %index, i64 %vl, i64 4) 13708 ret void 13709} 13710 13711define void @test_vsoxseg2_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_2t_nxv8i16(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, <vscale x 8 x i16> %index, i64 %vl, <vscale x 8 x i1> %mask) { 13712; CHECK-LABEL: test_vsoxseg2_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_2t_nxv8i16: 13713; CHECK: # %bb.0: # %entry 13714; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma 13715; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v12, v0.t 13716; CHECK-NEXT: ret 13717entry: 13718 tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 4) 13719 ret void 13720} 13721 13722 13723define void @test_vsoxseg2_nxv8bf16_triscv.vector.tuple_nxv16i8_2t_nxv8i32(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, <vscale x 8 x i32> %index, i64 %vl) { 13724; CHECK-LABEL: test_vsoxseg2_nxv8bf16_triscv.vector.tuple_nxv16i8_2t_nxv8i32: 13725; CHECK: # %bb.0: # %entry 13726; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma 13727; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v12 13728; CHECK-NEXT: ret 13729entry: 13730 tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i32(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, <vscale x 8 x i32> %index, i64 %vl, i64 4) 13731 ret void 13732} 13733 13734define void @test_vsoxseg2_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_2t_nxv8i32(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, <vscale x 8 x i32> %index, i64 %vl, <vscale x 8 x i1> %mask) { 13735; CHECK-LABEL: test_vsoxseg2_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_2t_nxv8i32: 13736; CHECK: # %bb.0: # %entry 13737; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma 13738; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v12, v0.t 13739; CHECK-NEXT: ret 13740entry: 13741 tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 4) 13742 ret void 13743} 13744 13745 13746define void @test_vsoxseg2_nxv8bf16_triscv.vector.tuple_nxv16i8_2t_nxv8i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, <vscale x 8 x i64> %index, i64 %vl) { 13747; CHECK-LABEL: test_vsoxseg2_nxv8bf16_triscv.vector.tuple_nxv16i8_2t_nxv8i64: 13748; CHECK: # %bb.0: # %entry 13749; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma 13750; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v16 13751; CHECK-NEXT: ret 13752entry: 13753 tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, <vscale x 8 x i64> %index, i64 %vl, i64 4) 13754 ret void 13755} 13756 13757define void @test_vsoxseg2_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_2t_nxv8i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, <vscale x 8 x i64> %index, i64 %vl, <vscale x 8 x i1> %mask) { 13758; CHECK-LABEL: test_vsoxseg2_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_2t_nxv8i64: 13759; CHECK: # %bb.0: # %entry 13760; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma 13761; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v16, v0.t 13762; CHECK-NEXT: ret 13763entry: 13764 tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i64.nxv8i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, <vscale x 8 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 4) 13765 ret void 13766} 13767 13768 13769define void @test_vsoxseg2_nxv16bf16_triscv.vector.tuple_nxv32i8_2t_nxv16i8(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, <vscale x 16 x i8> %index, i64 %vl) { 13770; CHECK-LABEL: test_vsoxseg2_nxv16bf16_triscv.vector.tuple_nxv32i8_2t_nxv16i8: 13771; CHECK: # %bb.0: # %entry 13772; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma 13773; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v16 13774; CHECK-NEXT: ret 13775entry: 13776 tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i8(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, <vscale x 16 x i8> %index, i64 %vl, i64 4) 13777 ret void 13778} 13779 13780define void @test_vsoxseg2_mask_nxv16bf16_triscv.vector.tuple_nxv32i8_2t_nxv16i8(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, <vscale x 16 x i8> %index, i64 %vl, <vscale x 16 x i1> %mask) { 13781; CHECK-LABEL: test_vsoxseg2_mask_nxv16bf16_triscv.vector.tuple_nxv32i8_2t_nxv16i8: 13782; CHECK: # %bb.0: # %entry 13783; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma 13784; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v16, v0.t 13785; CHECK-NEXT: ret 13786entry: 13787 tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, <vscale x 16 x i8> %index, <vscale x 16 x i1> %mask, i64 %vl, i64 4) 13788 ret void 13789} 13790 13791 13792define void @test_vsoxseg2_nxv16bf16_triscv.vector.tuple_nxv32i8_2t_nxv16i16(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, <vscale x 16 x i16> %index, i64 %vl) { 13793; CHECK-LABEL: test_vsoxseg2_nxv16bf16_triscv.vector.tuple_nxv32i8_2t_nxv16i16: 13794; CHECK: # %bb.0: # %entry 13795; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma 13796; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v16 13797; CHECK-NEXT: ret 13798entry: 13799 tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i16(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, <vscale x 16 x i16> %index, i64 %vl, i64 4) 13800 ret void 13801} 13802 13803define void @test_vsoxseg2_mask_nxv16bf16_triscv.vector.tuple_nxv32i8_2t_nxv16i16(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, <vscale x 16 x i16> %index, i64 %vl, <vscale x 16 x i1> %mask) { 13804; CHECK-LABEL: test_vsoxseg2_mask_nxv16bf16_triscv.vector.tuple_nxv32i8_2t_nxv16i16: 13805; CHECK: # %bb.0: # %entry 13806; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma 13807; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v16, v0.t 13808; CHECK-NEXT: ret 13809entry: 13810 tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, <vscale x 16 x i16> %index, <vscale x 16 x i1> %mask, i64 %vl, i64 4) 13811 ret void 13812} 13813 13814 13815define void @test_vsoxseg2_nxv16bf16_triscv.vector.tuple_nxv32i8_2t_nxv16i32(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, <vscale x 16 x i32> %index, i64 %vl) { 13816; CHECK-LABEL: test_vsoxseg2_nxv16bf16_triscv.vector.tuple_nxv32i8_2t_nxv16i32: 13817; CHECK: # %bb.0: # %entry 13818; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma 13819; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v16 13820; CHECK-NEXT: ret 13821entry: 13822 tail call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i32(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, <vscale x 16 x i32> %index, i64 %vl, i64 4) 13823 ret void 13824} 13825 13826define void @test_vsoxseg2_mask_nxv16bf16_triscv.vector.tuple_nxv32i8_2t_nxv16i32(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, <vscale x 16 x i32> %index, i64 %vl, <vscale x 16 x i1> %mask) { 13827; CHECK-LABEL: test_vsoxseg2_mask_nxv16bf16_triscv.vector.tuple_nxv32i8_2t_nxv16i32: 13828; CHECK: # %bb.0: # %entry 13829; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma 13830; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v16, v0.t 13831; CHECK-NEXT: ret 13832entry: 13833 tail call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, <vscale x 16 x i32> %index, <vscale x 16 x i1> %mask, i64 %vl, i64 4) 13834 ret void 13835} 13836 13837 13838define void @test_vsoxseg3_nxv1bf16_triscv.vector.tuple_nxv2i8_3t_nxv1i8(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) %val, ptr %base, <vscale x 1 x i8> %index, i64 %vl) { 13839; CHECK-LABEL: test_vsoxseg3_nxv1bf16_triscv.vector.tuple_nxv2i8_3t_nxv1i8: 13840; CHECK: # %bb.0: # %entry 13841; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma 13842; CHECK-NEXT: vsoxseg3ei8.v v8, (a0), v11 13843; CHECK-NEXT: ret 13844entry: 13845 tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i8(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) %val, ptr %base, <vscale x 1 x i8> %index, i64 %vl, i64 4) 13846 ret void 13847} 13848 13849define void @test_vsoxseg3_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_3t_nxv1i8(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) %val, ptr %base, <vscale x 1 x i8> %index, i64 %vl, <vscale x 1 x i1> %mask) { 13850; CHECK-LABEL: test_vsoxseg3_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_3t_nxv1i8: 13851; CHECK: # %bb.0: # %entry 13852; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma 13853; CHECK-NEXT: vsoxseg3ei8.v v8, (a0), v11, v0.t 13854; CHECK-NEXT: ret 13855entry: 13856 tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 4) 13857 ret void 13858} 13859 13860 13861define void @test_vsoxseg3_nxv1bf16_triscv.vector.tuple_nxv2i8_3t_nxv1i16(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl) { 13862; CHECK-LABEL: test_vsoxseg3_nxv1bf16_triscv.vector.tuple_nxv2i8_3t_nxv1i16: 13863; CHECK: # %bb.0: # %entry 13864; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma 13865; CHECK-NEXT: vsoxseg3ei16.v v8, (a0), v11 13866; CHECK-NEXT: ret 13867entry: 13868 tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i16(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl, i64 4) 13869 ret void 13870} 13871 13872define void @test_vsoxseg3_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_3t_nxv1i16(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl, <vscale x 1 x i1> %mask) { 13873; CHECK-LABEL: test_vsoxseg3_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_3t_nxv1i16: 13874; CHECK: # %bb.0: # %entry 13875; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma 13876; CHECK-NEXT: vsoxseg3ei16.v v8, (a0), v11, v0.t 13877; CHECK-NEXT: ret 13878entry: 13879 tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 4) 13880 ret void 13881} 13882 13883 13884define void @test_vsoxseg3_nxv1bf16_triscv.vector.tuple_nxv2i8_3t_nxv1i32(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) %val, ptr %base, <vscale x 1 x i32> %index, i64 %vl) { 13885; CHECK-LABEL: test_vsoxseg3_nxv1bf16_triscv.vector.tuple_nxv2i8_3t_nxv1i32: 13886; CHECK: # %bb.0: # %entry 13887; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma 13888; CHECK-NEXT: vsoxseg3ei32.v v8, (a0), v11 13889; CHECK-NEXT: ret 13890entry: 13891 tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i32(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) %val, ptr %base, <vscale x 1 x i32> %index, i64 %vl, i64 4) 13892 ret void 13893} 13894 13895define void @test_vsoxseg3_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_3t_nxv1i32(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) %val, ptr %base, <vscale x 1 x i32> %index, i64 %vl, <vscale x 1 x i1> %mask) { 13896; CHECK-LABEL: test_vsoxseg3_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_3t_nxv1i32: 13897; CHECK: # %bb.0: # %entry 13898; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma 13899; CHECK-NEXT: vsoxseg3ei32.v v8, (a0), v11, v0.t 13900; CHECK-NEXT: ret 13901entry: 13902 tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 4) 13903 ret void 13904} 13905 13906 13907define void @test_vsoxseg3_nxv1bf16_triscv.vector.tuple_nxv2i8_3t_nxv1i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) %val, ptr %base, <vscale x 1 x i64> %index, i64 %vl) { 13908; CHECK-LABEL: test_vsoxseg3_nxv1bf16_triscv.vector.tuple_nxv2i8_3t_nxv1i64: 13909; CHECK: # %bb.0: # %entry 13910; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma 13911; CHECK-NEXT: vsoxseg3ei64.v v8, (a0), v11 13912; CHECK-NEXT: ret 13913entry: 13914 tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) %val, ptr %base, <vscale x 1 x i64> %index, i64 %vl, i64 4) 13915 ret void 13916} 13917 13918define void @test_vsoxseg3_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_3t_nxv1i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) %val, ptr %base, <vscale x 1 x i64> %index, i64 %vl, <vscale x 1 x i1> %mask) { 13919; CHECK-LABEL: test_vsoxseg3_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_3t_nxv1i64: 13920; CHECK: # %bb.0: # %entry 13921; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma 13922; CHECK-NEXT: vsoxseg3ei64.v v8, (a0), v11, v0.t 13923; CHECK-NEXT: ret 13924entry: 13925 tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i64.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) %val, ptr %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 4) 13926 ret void 13927} 13928 13929 13930define void @test_vsoxseg3_nxv2bf16_triscv.vector.tuple_nxv4i8_3t_nxv2i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %val, ptr %base, <vscale x 2 x i8> %index, i64 %vl) { 13931; CHECK-LABEL: test_vsoxseg3_nxv2bf16_triscv.vector.tuple_nxv4i8_3t_nxv2i8: 13932; CHECK: # %bb.0: # %entry 13933; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma 13934; CHECK-NEXT: vsoxseg3ei8.v v8, (a0), v11 13935; CHECK-NEXT: ret 13936entry: 13937 tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %val, ptr %base, <vscale x 2 x i8> %index, i64 %vl, i64 4) 13938 ret void 13939} 13940 13941define void @test_vsoxseg3_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_3t_nxv2i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %val, ptr %base, <vscale x 2 x i8> %index, i64 %vl, <vscale x 2 x i1> %mask) { 13942; CHECK-LABEL: test_vsoxseg3_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_3t_nxv2i8: 13943; CHECK: # %bb.0: # %entry 13944; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma 13945; CHECK-NEXT: vsoxseg3ei8.v v8, (a0), v11, v0.t 13946; CHECK-NEXT: ret 13947entry: 13948 tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 4) 13949 ret void 13950} 13951 13952 13953define void @test_vsoxseg3_nxv2bf16_triscv.vector.tuple_nxv4i8_3t_nxv2i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %val, ptr %base, <vscale x 2 x i16> %index, i64 %vl) { 13954; CHECK-LABEL: test_vsoxseg3_nxv2bf16_triscv.vector.tuple_nxv4i8_3t_nxv2i16: 13955; CHECK: # %bb.0: # %entry 13956; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma 13957; CHECK-NEXT: vsoxseg3ei16.v v8, (a0), v11 13958; CHECK-NEXT: ret 13959entry: 13960 tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %val, ptr %base, <vscale x 2 x i16> %index, i64 %vl, i64 4) 13961 ret void 13962} 13963 13964define void @test_vsoxseg3_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_3t_nxv2i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %val, ptr %base, <vscale x 2 x i16> %index, i64 %vl, <vscale x 2 x i1> %mask) { 13965; CHECK-LABEL: test_vsoxseg3_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_3t_nxv2i16: 13966; CHECK: # %bb.0: # %entry 13967; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma 13968; CHECK-NEXT: vsoxseg3ei16.v v8, (a0), v11, v0.t 13969; CHECK-NEXT: ret 13970entry: 13971 tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 4) 13972 ret void 13973} 13974 13975 13976define void @test_vsoxseg3_nxv2bf16_triscv.vector.tuple_nxv4i8_3t_nxv2i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %val, ptr %base, <vscale x 2 x i32> %index, i64 %vl) { 13977; CHECK-LABEL: test_vsoxseg3_nxv2bf16_triscv.vector.tuple_nxv4i8_3t_nxv2i32: 13978; CHECK: # %bb.0: # %entry 13979; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma 13980; CHECK-NEXT: vsoxseg3ei32.v v8, (a0), v11 13981; CHECK-NEXT: ret 13982entry: 13983 tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %val, ptr %base, <vscale x 2 x i32> %index, i64 %vl, i64 4) 13984 ret void 13985} 13986 13987define void @test_vsoxseg3_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_3t_nxv2i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %val, ptr %base, <vscale x 2 x i32> %index, i64 %vl, <vscale x 2 x i1> %mask) { 13988; CHECK-LABEL: test_vsoxseg3_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_3t_nxv2i32: 13989; CHECK: # %bb.0: # %entry 13990; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma 13991; CHECK-NEXT: vsoxseg3ei32.v v8, (a0), v11, v0.t 13992; CHECK-NEXT: ret 13993entry: 13994 tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 4) 13995 ret void 13996} 13997 13998 13999define void @test_vsoxseg3_nxv2bf16_triscv.vector.tuple_nxv4i8_3t_nxv2i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl) { 14000; CHECK-LABEL: test_vsoxseg3_nxv2bf16_triscv.vector.tuple_nxv4i8_3t_nxv2i64: 14001; CHECK: # %bb.0: # %entry 14002; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma 14003; CHECK-NEXT: vsoxseg3ei64.v v8, (a0), v12 14004; CHECK-NEXT: ret 14005entry: 14006 tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl, i64 4) 14007 ret void 14008} 14009 14010define void @test_vsoxseg3_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_3t_nxv2i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl, <vscale x 2 x i1> %mask) { 14011; CHECK-LABEL: test_vsoxseg3_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_3t_nxv2i64: 14012; CHECK: # %bb.0: # %entry 14013; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma 14014; CHECK-NEXT: vsoxseg3ei64.v v8, (a0), v12, v0.t 14015; CHECK-NEXT: ret 14016entry: 14017 tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i64.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %val, ptr %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 4) 14018 ret void 14019} 14020 14021 14022define void @test_vsoxseg3_nxv4bf16_triscv.vector.tuple_nxv8i8_3t_nxv4i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, <vscale x 4 x i8> %index, i64 %vl) { 14023; CHECK-LABEL: test_vsoxseg3_nxv4bf16_triscv.vector.tuple_nxv8i8_3t_nxv4i8: 14024; CHECK: # %bb.0: # %entry 14025; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma 14026; CHECK-NEXT: vsoxseg3ei8.v v8, (a0), v11 14027; CHECK-NEXT: ret 14028entry: 14029 tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, <vscale x 4 x i8> %index, i64 %vl, i64 4) 14030 ret void 14031} 14032 14033define void @test_vsoxseg3_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_3t_nxv4i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, <vscale x 4 x i8> %index, i64 %vl, <vscale x 4 x i1> %mask) { 14034; CHECK-LABEL: test_vsoxseg3_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_3t_nxv4i8: 14035; CHECK: # %bb.0: # %entry 14036; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma 14037; CHECK-NEXT: vsoxseg3ei8.v v8, (a0), v11, v0.t 14038; CHECK-NEXT: ret 14039entry: 14040 tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 4) 14041 ret void 14042} 14043 14044 14045define void @test_vsoxseg3_nxv4bf16_triscv.vector.tuple_nxv8i8_3t_nxv4i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, <vscale x 4 x i16> %index, i64 %vl) { 14046; CHECK-LABEL: test_vsoxseg3_nxv4bf16_triscv.vector.tuple_nxv8i8_3t_nxv4i16: 14047; CHECK: # %bb.0: # %entry 14048; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma 14049; CHECK-NEXT: vsoxseg3ei16.v v8, (a0), v11 14050; CHECK-NEXT: ret 14051entry: 14052 tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, <vscale x 4 x i16> %index, i64 %vl, i64 4) 14053 ret void 14054} 14055 14056define void @test_vsoxseg3_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_3t_nxv4i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, <vscale x 4 x i16> %index, i64 %vl, <vscale x 4 x i1> %mask) { 14057; CHECK-LABEL: test_vsoxseg3_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_3t_nxv4i16: 14058; CHECK: # %bb.0: # %entry 14059; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma 14060; CHECK-NEXT: vsoxseg3ei16.v v8, (a0), v11, v0.t 14061; CHECK-NEXT: ret 14062entry: 14063 tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 4) 14064 ret void 14065} 14066 14067 14068define void @test_vsoxseg3_nxv4bf16_triscv.vector.tuple_nxv8i8_3t_nxv4i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, <vscale x 4 x i32> %index, i64 %vl) { 14069; CHECK-LABEL: test_vsoxseg3_nxv4bf16_triscv.vector.tuple_nxv8i8_3t_nxv4i32: 14070; CHECK: # %bb.0: # %entry 14071; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma 14072; CHECK-NEXT: vsoxseg3ei32.v v8, (a0), v12 14073; CHECK-NEXT: ret 14074entry: 14075 tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, <vscale x 4 x i32> %index, i64 %vl, i64 4) 14076 ret void 14077} 14078 14079define void @test_vsoxseg3_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_3t_nxv4i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, <vscale x 4 x i32> %index, i64 %vl, <vscale x 4 x i1> %mask) { 14080; CHECK-LABEL: test_vsoxseg3_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_3t_nxv4i32: 14081; CHECK: # %bb.0: # %entry 14082; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma 14083; CHECK-NEXT: vsoxseg3ei32.v v8, (a0), v12, v0.t 14084; CHECK-NEXT: ret 14085entry: 14086 tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 4) 14087 ret void 14088} 14089 14090 14091define void @test_vsoxseg3_nxv4bf16_triscv.vector.tuple_nxv8i8_3t_nxv4i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, <vscale x 4 x i64> %index, i64 %vl) { 14092; CHECK-LABEL: test_vsoxseg3_nxv4bf16_triscv.vector.tuple_nxv8i8_3t_nxv4i64: 14093; CHECK: # %bb.0: # %entry 14094; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma 14095; CHECK-NEXT: vsoxseg3ei64.v v8, (a0), v12 14096; CHECK-NEXT: ret 14097entry: 14098 tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, <vscale x 4 x i64> %index, i64 %vl, i64 4) 14099 ret void 14100} 14101 14102define void @test_vsoxseg3_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_3t_nxv4i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, <vscale x 4 x i64> %index, i64 %vl, <vscale x 4 x i1> %mask) { 14103; CHECK-LABEL: test_vsoxseg3_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_3t_nxv4i64: 14104; CHECK: # %bb.0: # %entry 14105; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma 14106; CHECK-NEXT: vsoxseg3ei64.v v8, (a0), v12, v0.t 14107; CHECK-NEXT: ret 14108entry: 14109 tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i64.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 4) 14110 ret void 14111} 14112 14113 14114define void @test_vsoxseg3_nxv8bf16_triscv.vector.tuple_nxv16i8_3t_nxv8i8(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, <vscale x 8 x i8> %index, i64 %vl) { 14115; CHECK-LABEL: test_vsoxseg3_nxv8bf16_triscv.vector.tuple_nxv16i8_3t_nxv8i8: 14116; CHECK: # %bb.0: # %entry 14117; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma 14118; CHECK-NEXT: vsoxseg3ei8.v v8, (a0), v14 14119; CHECK-NEXT: ret 14120entry: 14121 tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i8(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, <vscale x 8 x i8> %index, i64 %vl, i64 4) 14122 ret void 14123} 14124 14125define void @test_vsoxseg3_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_3t_nxv8i8(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, <vscale x 8 x i8> %index, i64 %vl, <vscale x 8 x i1> %mask) { 14126; CHECK-LABEL: test_vsoxseg3_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_3t_nxv8i8: 14127; CHECK: # %bb.0: # %entry 14128; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma 14129; CHECK-NEXT: vsoxseg3ei8.v v8, (a0), v14, v0.t 14130; CHECK-NEXT: ret 14131entry: 14132 tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, <vscale x 8 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 4) 14133 ret void 14134} 14135 14136 14137define void @test_vsoxseg3_nxv8bf16_triscv.vector.tuple_nxv16i8_3t_nxv8i16(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, <vscale x 8 x i16> %index, i64 %vl) { 14138; CHECK-LABEL: test_vsoxseg3_nxv8bf16_triscv.vector.tuple_nxv16i8_3t_nxv8i16: 14139; CHECK: # %bb.0: # %entry 14140; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma 14141; CHECK-NEXT: vsoxseg3ei16.v v8, (a0), v14 14142; CHECK-NEXT: ret 14143entry: 14144 tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i16(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, <vscale x 8 x i16> %index, i64 %vl, i64 4) 14145 ret void 14146} 14147 14148define void @test_vsoxseg3_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_3t_nxv8i16(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, <vscale x 8 x i16> %index, i64 %vl, <vscale x 8 x i1> %mask) { 14149; CHECK-LABEL: test_vsoxseg3_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_3t_nxv8i16: 14150; CHECK: # %bb.0: # %entry 14151; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma 14152; CHECK-NEXT: vsoxseg3ei16.v v8, (a0), v14, v0.t 14153; CHECK-NEXT: ret 14154entry: 14155 tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 4) 14156 ret void 14157} 14158 14159 14160define void @test_vsoxseg3_nxv8bf16_triscv.vector.tuple_nxv16i8_3t_nxv8i32(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, <vscale x 8 x i32> %index, i64 %vl) { 14161; CHECK-LABEL: test_vsoxseg3_nxv8bf16_triscv.vector.tuple_nxv16i8_3t_nxv8i32: 14162; CHECK: # %bb.0: # %entry 14163; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma 14164; CHECK-NEXT: vsoxseg3ei32.v v8, (a0), v16 14165; CHECK-NEXT: ret 14166entry: 14167 tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i32(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, <vscale x 8 x i32> %index, i64 %vl, i64 4) 14168 ret void 14169} 14170 14171define void @test_vsoxseg3_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_3t_nxv8i32(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, <vscale x 8 x i32> %index, i64 %vl, <vscale x 8 x i1> %mask) { 14172; CHECK-LABEL: test_vsoxseg3_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_3t_nxv8i32: 14173; CHECK: # %bb.0: # %entry 14174; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma 14175; CHECK-NEXT: vsoxseg3ei32.v v8, (a0), v16, v0.t 14176; CHECK-NEXT: ret 14177entry: 14178 tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 4) 14179 ret void 14180} 14181 14182 14183define void @test_vsoxseg3_nxv8bf16_triscv.vector.tuple_nxv16i8_3t_nxv8i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, <vscale x 8 x i64> %index, i64 %vl) { 14184; CHECK-LABEL: test_vsoxseg3_nxv8bf16_triscv.vector.tuple_nxv16i8_3t_nxv8i64: 14185; CHECK: # %bb.0: # %entry 14186; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma 14187; CHECK-NEXT: vsoxseg3ei64.v v8, (a0), v16 14188; CHECK-NEXT: ret 14189entry: 14190 tail call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, <vscale x 8 x i64> %index, i64 %vl, i64 4) 14191 ret void 14192} 14193 14194define void @test_vsoxseg3_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_3t_nxv8i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, <vscale x 8 x i64> %index, i64 %vl, <vscale x 8 x i1> %mask) { 14195; CHECK-LABEL: test_vsoxseg3_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_3t_nxv8i64: 14196; CHECK: # %bb.0: # %entry 14197; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma 14198; CHECK-NEXT: vsoxseg3ei64.v v8, (a0), v16, v0.t 14199; CHECK-NEXT: ret 14200entry: 14201 tail call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i64.nxv8i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, <vscale x 8 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 4) 14202 ret void 14203} 14204 14205 14206define void @test_vsoxseg4_nxv1bf16_triscv.vector.tuple_nxv2i8_4t_nxv1i8(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) %val, ptr %base, <vscale x 1 x i8> %index, i64 %vl) { 14207; CHECK-LABEL: test_vsoxseg4_nxv1bf16_triscv.vector.tuple_nxv2i8_4t_nxv1i8: 14208; CHECK: # %bb.0: # %entry 14209; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma 14210; CHECK-NEXT: vsoxseg4ei8.v v8, (a0), v12 14211; CHECK-NEXT: ret 14212entry: 14213 tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i8(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) %val, ptr %base, <vscale x 1 x i8> %index, i64 %vl, i64 4) 14214 ret void 14215} 14216 14217define void @test_vsoxseg4_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_4t_nxv1i8(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) %val, ptr %base, <vscale x 1 x i8> %index, i64 %vl, <vscale x 1 x i1> %mask) { 14218; CHECK-LABEL: test_vsoxseg4_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_4t_nxv1i8: 14219; CHECK: # %bb.0: # %entry 14220; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma 14221; CHECK-NEXT: vsoxseg4ei8.v v8, (a0), v12, v0.t 14222; CHECK-NEXT: ret 14223entry: 14224 tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 4) 14225 ret void 14226} 14227 14228 14229define void @test_vsoxseg4_nxv1bf16_triscv.vector.tuple_nxv2i8_4t_nxv1i16(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl) { 14230; CHECK-LABEL: test_vsoxseg4_nxv1bf16_triscv.vector.tuple_nxv2i8_4t_nxv1i16: 14231; CHECK: # %bb.0: # %entry 14232; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma 14233; CHECK-NEXT: vsoxseg4ei16.v v8, (a0), v12 14234; CHECK-NEXT: ret 14235entry: 14236 tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i16(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl, i64 4) 14237 ret void 14238} 14239 14240define void @test_vsoxseg4_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_4t_nxv1i16(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl, <vscale x 1 x i1> %mask) { 14241; CHECK-LABEL: test_vsoxseg4_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_4t_nxv1i16: 14242; CHECK: # %bb.0: # %entry 14243; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma 14244; CHECK-NEXT: vsoxseg4ei16.v v8, (a0), v12, v0.t 14245; CHECK-NEXT: ret 14246entry: 14247 tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 4) 14248 ret void 14249} 14250 14251 14252define void @test_vsoxseg4_nxv1bf16_triscv.vector.tuple_nxv2i8_4t_nxv1i32(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) %val, ptr %base, <vscale x 1 x i32> %index, i64 %vl) { 14253; CHECK-LABEL: test_vsoxseg4_nxv1bf16_triscv.vector.tuple_nxv2i8_4t_nxv1i32: 14254; CHECK: # %bb.0: # %entry 14255; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma 14256; CHECK-NEXT: vsoxseg4ei32.v v8, (a0), v12 14257; CHECK-NEXT: ret 14258entry: 14259 tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i32(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) %val, ptr %base, <vscale x 1 x i32> %index, i64 %vl, i64 4) 14260 ret void 14261} 14262 14263define void @test_vsoxseg4_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_4t_nxv1i32(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) %val, ptr %base, <vscale x 1 x i32> %index, i64 %vl, <vscale x 1 x i1> %mask) { 14264; CHECK-LABEL: test_vsoxseg4_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_4t_nxv1i32: 14265; CHECK: # %bb.0: # %entry 14266; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma 14267; CHECK-NEXT: vsoxseg4ei32.v v8, (a0), v12, v0.t 14268; CHECK-NEXT: ret 14269entry: 14270 tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 4) 14271 ret void 14272} 14273 14274 14275define void @test_vsoxseg4_nxv1bf16_triscv.vector.tuple_nxv2i8_4t_nxv1i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) %val, ptr %base, <vscale x 1 x i64> %index, i64 %vl) { 14276; CHECK-LABEL: test_vsoxseg4_nxv1bf16_triscv.vector.tuple_nxv2i8_4t_nxv1i64: 14277; CHECK: # %bb.0: # %entry 14278; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma 14279; CHECK-NEXT: vsoxseg4ei64.v v8, (a0), v12 14280; CHECK-NEXT: ret 14281entry: 14282 tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) %val, ptr %base, <vscale x 1 x i64> %index, i64 %vl, i64 4) 14283 ret void 14284} 14285 14286define void @test_vsoxseg4_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_4t_nxv1i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) %val, ptr %base, <vscale x 1 x i64> %index, i64 %vl, <vscale x 1 x i1> %mask) { 14287; CHECK-LABEL: test_vsoxseg4_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_4t_nxv1i64: 14288; CHECK: # %bb.0: # %entry 14289; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma 14290; CHECK-NEXT: vsoxseg4ei64.v v8, (a0), v12, v0.t 14291; CHECK-NEXT: ret 14292entry: 14293 tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i64.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) %val, ptr %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 4) 14294 ret void 14295} 14296 14297 14298define void @test_vsoxseg4_nxv2bf16_triscv.vector.tuple_nxv4i8_4t_nxv2i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %val, ptr %base, <vscale x 2 x i8> %index, i64 %vl) { 14299; CHECK-LABEL: test_vsoxseg4_nxv2bf16_triscv.vector.tuple_nxv4i8_4t_nxv2i8: 14300; CHECK: # %bb.0: # %entry 14301; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma 14302; CHECK-NEXT: vsoxseg4ei8.v v8, (a0), v12 14303; CHECK-NEXT: ret 14304entry: 14305 tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %val, ptr %base, <vscale x 2 x i8> %index, i64 %vl, i64 4) 14306 ret void 14307} 14308 14309define void @test_vsoxseg4_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_4t_nxv2i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %val, ptr %base, <vscale x 2 x i8> %index, i64 %vl, <vscale x 2 x i1> %mask) { 14310; CHECK-LABEL: test_vsoxseg4_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_4t_nxv2i8: 14311; CHECK: # %bb.0: # %entry 14312; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma 14313; CHECK-NEXT: vsoxseg4ei8.v v8, (a0), v12, v0.t 14314; CHECK-NEXT: ret 14315entry: 14316 tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 4) 14317 ret void 14318} 14319 14320 14321define void @test_vsoxseg4_nxv2bf16_triscv.vector.tuple_nxv4i8_4t_nxv2i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %val, ptr %base, <vscale x 2 x i16> %index, i64 %vl) { 14322; CHECK-LABEL: test_vsoxseg4_nxv2bf16_triscv.vector.tuple_nxv4i8_4t_nxv2i16: 14323; CHECK: # %bb.0: # %entry 14324; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma 14325; CHECK-NEXT: vsoxseg4ei16.v v8, (a0), v12 14326; CHECK-NEXT: ret 14327entry: 14328 tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %val, ptr %base, <vscale x 2 x i16> %index, i64 %vl, i64 4) 14329 ret void 14330} 14331 14332define void @test_vsoxseg4_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_4t_nxv2i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %val, ptr %base, <vscale x 2 x i16> %index, i64 %vl, <vscale x 2 x i1> %mask) { 14333; CHECK-LABEL: test_vsoxseg4_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_4t_nxv2i16: 14334; CHECK: # %bb.0: # %entry 14335; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma 14336; CHECK-NEXT: vsoxseg4ei16.v v8, (a0), v12, v0.t 14337; CHECK-NEXT: ret 14338entry: 14339 tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 4) 14340 ret void 14341} 14342 14343 14344define void @test_vsoxseg4_nxv2bf16_triscv.vector.tuple_nxv4i8_4t_nxv2i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %val, ptr %base, <vscale x 2 x i32> %index, i64 %vl) { 14345; CHECK-LABEL: test_vsoxseg4_nxv2bf16_triscv.vector.tuple_nxv4i8_4t_nxv2i32: 14346; CHECK: # %bb.0: # %entry 14347; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma 14348; CHECK-NEXT: vsoxseg4ei32.v v8, (a0), v12 14349; CHECK-NEXT: ret 14350entry: 14351 tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %val, ptr %base, <vscale x 2 x i32> %index, i64 %vl, i64 4) 14352 ret void 14353} 14354 14355define void @test_vsoxseg4_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_4t_nxv2i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %val, ptr %base, <vscale x 2 x i32> %index, i64 %vl, <vscale x 2 x i1> %mask) { 14356; CHECK-LABEL: test_vsoxseg4_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_4t_nxv2i32: 14357; CHECK: # %bb.0: # %entry 14358; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma 14359; CHECK-NEXT: vsoxseg4ei32.v v8, (a0), v12, v0.t 14360; CHECK-NEXT: ret 14361entry: 14362 tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 4) 14363 ret void 14364} 14365 14366 14367define void @test_vsoxseg4_nxv2bf16_triscv.vector.tuple_nxv4i8_4t_nxv2i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl) { 14368; CHECK-LABEL: test_vsoxseg4_nxv2bf16_triscv.vector.tuple_nxv4i8_4t_nxv2i64: 14369; CHECK: # %bb.0: # %entry 14370; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma 14371; CHECK-NEXT: vsoxseg4ei64.v v8, (a0), v12 14372; CHECK-NEXT: ret 14373entry: 14374 tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl, i64 4) 14375 ret void 14376} 14377 14378define void @test_vsoxseg4_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_4t_nxv2i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl, <vscale x 2 x i1> %mask) { 14379; CHECK-LABEL: test_vsoxseg4_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_4t_nxv2i64: 14380; CHECK: # %bb.0: # %entry 14381; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma 14382; CHECK-NEXT: vsoxseg4ei64.v v8, (a0), v12, v0.t 14383; CHECK-NEXT: ret 14384entry: 14385 tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i64.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %val, ptr %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 4) 14386 ret void 14387} 14388 14389 14390define void @test_vsoxseg4_nxv4bf16_triscv.vector.tuple_nxv8i8_4t_nxv4i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, <vscale x 4 x i8> %index, i64 %vl) { 14391; CHECK-LABEL: test_vsoxseg4_nxv4bf16_triscv.vector.tuple_nxv8i8_4t_nxv4i8: 14392; CHECK: # %bb.0: # %entry 14393; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma 14394; CHECK-NEXT: vsoxseg4ei8.v v8, (a0), v12 14395; CHECK-NEXT: ret 14396entry: 14397 tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, <vscale x 4 x i8> %index, i64 %vl, i64 4) 14398 ret void 14399} 14400 14401define void @test_vsoxseg4_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_4t_nxv4i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, <vscale x 4 x i8> %index, i64 %vl, <vscale x 4 x i1> %mask) { 14402; CHECK-LABEL: test_vsoxseg4_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_4t_nxv4i8: 14403; CHECK: # %bb.0: # %entry 14404; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma 14405; CHECK-NEXT: vsoxseg4ei8.v v8, (a0), v12, v0.t 14406; CHECK-NEXT: ret 14407entry: 14408 tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 4) 14409 ret void 14410} 14411 14412 14413define void @test_vsoxseg4_nxv4bf16_triscv.vector.tuple_nxv8i8_4t_nxv4i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, <vscale x 4 x i16> %index, i64 %vl) { 14414; CHECK-LABEL: test_vsoxseg4_nxv4bf16_triscv.vector.tuple_nxv8i8_4t_nxv4i16: 14415; CHECK: # %bb.0: # %entry 14416; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma 14417; CHECK-NEXT: vsoxseg4ei16.v v8, (a0), v12 14418; CHECK-NEXT: ret 14419entry: 14420 tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, <vscale x 4 x i16> %index, i64 %vl, i64 4) 14421 ret void 14422} 14423 14424define void @test_vsoxseg4_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_4t_nxv4i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, <vscale x 4 x i16> %index, i64 %vl, <vscale x 4 x i1> %mask) { 14425; CHECK-LABEL: test_vsoxseg4_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_4t_nxv4i16: 14426; CHECK: # %bb.0: # %entry 14427; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma 14428; CHECK-NEXT: vsoxseg4ei16.v v8, (a0), v12, v0.t 14429; CHECK-NEXT: ret 14430entry: 14431 tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 4) 14432 ret void 14433} 14434 14435 14436define void @test_vsoxseg4_nxv4bf16_triscv.vector.tuple_nxv8i8_4t_nxv4i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, <vscale x 4 x i32> %index, i64 %vl) { 14437; CHECK-LABEL: test_vsoxseg4_nxv4bf16_triscv.vector.tuple_nxv8i8_4t_nxv4i32: 14438; CHECK: # %bb.0: # %entry 14439; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma 14440; CHECK-NEXT: vsoxseg4ei32.v v8, (a0), v12 14441; CHECK-NEXT: ret 14442entry: 14443 tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, <vscale x 4 x i32> %index, i64 %vl, i64 4) 14444 ret void 14445} 14446 14447define void @test_vsoxseg4_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_4t_nxv4i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, <vscale x 4 x i32> %index, i64 %vl, <vscale x 4 x i1> %mask) { 14448; CHECK-LABEL: test_vsoxseg4_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_4t_nxv4i32: 14449; CHECK: # %bb.0: # %entry 14450; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma 14451; CHECK-NEXT: vsoxseg4ei32.v v8, (a0), v12, v0.t 14452; CHECK-NEXT: ret 14453entry: 14454 tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 4) 14455 ret void 14456} 14457 14458 14459define void @test_vsoxseg4_nxv4bf16_triscv.vector.tuple_nxv8i8_4t_nxv4i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, <vscale x 4 x i64> %index, i64 %vl) { 14460; CHECK-LABEL: test_vsoxseg4_nxv4bf16_triscv.vector.tuple_nxv8i8_4t_nxv4i64: 14461; CHECK: # %bb.0: # %entry 14462; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma 14463; CHECK-NEXT: vsoxseg4ei64.v v8, (a0), v12 14464; CHECK-NEXT: ret 14465entry: 14466 tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, <vscale x 4 x i64> %index, i64 %vl, i64 4) 14467 ret void 14468} 14469 14470define void @test_vsoxseg4_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_4t_nxv4i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, <vscale x 4 x i64> %index, i64 %vl, <vscale x 4 x i1> %mask) { 14471; CHECK-LABEL: test_vsoxseg4_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_4t_nxv4i64: 14472; CHECK: # %bb.0: # %entry 14473; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma 14474; CHECK-NEXT: vsoxseg4ei64.v v8, (a0), v12, v0.t 14475; CHECK-NEXT: ret 14476entry: 14477 tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i64.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 4) 14478 ret void 14479} 14480 14481 14482define void @test_vsoxseg4_nxv8bf16_triscv.vector.tuple_nxv16i8_4t_nxv8i8(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, <vscale x 8 x i8> %index, i64 %vl) { 14483; CHECK-LABEL: test_vsoxseg4_nxv8bf16_triscv.vector.tuple_nxv16i8_4t_nxv8i8: 14484; CHECK: # %bb.0: # %entry 14485; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma 14486; CHECK-NEXT: vsoxseg4ei8.v v8, (a0), v16 14487; CHECK-NEXT: ret 14488entry: 14489 tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i8(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, <vscale x 8 x i8> %index, i64 %vl, i64 4) 14490 ret void 14491} 14492 14493define void @test_vsoxseg4_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_4t_nxv8i8(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, <vscale x 8 x i8> %index, i64 %vl, <vscale x 8 x i1> %mask) { 14494; CHECK-LABEL: test_vsoxseg4_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_4t_nxv8i8: 14495; CHECK: # %bb.0: # %entry 14496; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma 14497; CHECK-NEXT: vsoxseg4ei8.v v8, (a0), v16, v0.t 14498; CHECK-NEXT: ret 14499entry: 14500 tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, <vscale x 8 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 4) 14501 ret void 14502} 14503 14504 14505define void @test_vsoxseg4_nxv8bf16_triscv.vector.tuple_nxv16i8_4t_nxv8i16(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, <vscale x 8 x i16> %index, i64 %vl) { 14506; CHECK-LABEL: test_vsoxseg4_nxv8bf16_triscv.vector.tuple_nxv16i8_4t_nxv8i16: 14507; CHECK: # %bb.0: # %entry 14508; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma 14509; CHECK-NEXT: vsoxseg4ei16.v v8, (a0), v16 14510; CHECK-NEXT: ret 14511entry: 14512 tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i16(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, <vscale x 8 x i16> %index, i64 %vl, i64 4) 14513 ret void 14514} 14515 14516define void @test_vsoxseg4_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_4t_nxv8i16(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, <vscale x 8 x i16> %index, i64 %vl, <vscale x 8 x i1> %mask) { 14517; CHECK-LABEL: test_vsoxseg4_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_4t_nxv8i16: 14518; CHECK: # %bb.0: # %entry 14519; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma 14520; CHECK-NEXT: vsoxseg4ei16.v v8, (a0), v16, v0.t 14521; CHECK-NEXT: ret 14522entry: 14523 tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 4) 14524 ret void 14525} 14526 14527 14528define void @test_vsoxseg4_nxv8bf16_triscv.vector.tuple_nxv16i8_4t_nxv8i32(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, <vscale x 8 x i32> %index, i64 %vl) { 14529; CHECK-LABEL: test_vsoxseg4_nxv8bf16_triscv.vector.tuple_nxv16i8_4t_nxv8i32: 14530; CHECK: # %bb.0: # %entry 14531; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma 14532; CHECK-NEXT: vsoxseg4ei32.v v8, (a0), v16 14533; CHECK-NEXT: ret 14534entry: 14535 tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i32(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, <vscale x 8 x i32> %index, i64 %vl, i64 4) 14536 ret void 14537} 14538 14539define void @test_vsoxseg4_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_4t_nxv8i32(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, <vscale x 8 x i32> %index, i64 %vl, <vscale x 8 x i1> %mask) { 14540; CHECK-LABEL: test_vsoxseg4_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_4t_nxv8i32: 14541; CHECK: # %bb.0: # %entry 14542; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma 14543; CHECK-NEXT: vsoxseg4ei32.v v8, (a0), v16, v0.t 14544; CHECK-NEXT: ret 14545entry: 14546 tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 4) 14547 ret void 14548} 14549 14550 14551define void @test_vsoxseg4_nxv8bf16_triscv.vector.tuple_nxv16i8_4t_nxv8i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, <vscale x 8 x i64> %index, i64 %vl) { 14552; CHECK-LABEL: test_vsoxseg4_nxv8bf16_triscv.vector.tuple_nxv16i8_4t_nxv8i64: 14553; CHECK: # %bb.0: # %entry 14554; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma 14555; CHECK-NEXT: vsoxseg4ei64.v v8, (a0), v16 14556; CHECK-NEXT: ret 14557entry: 14558 tail call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, <vscale x 8 x i64> %index, i64 %vl, i64 4) 14559 ret void 14560} 14561 14562define void @test_vsoxseg4_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_4t_nxv8i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, <vscale x 8 x i64> %index, i64 %vl, <vscale x 8 x i1> %mask) { 14563; CHECK-LABEL: test_vsoxseg4_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_4t_nxv8i64: 14564; CHECK: # %bb.0: # %entry 14565; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma 14566; CHECK-NEXT: vsoxseg4ei64.v v8, (a0), v16, v0.t 14567; CHECK-NEXT: ret 14568entry: 14569 tail call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i64.nxv8i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, <vscale x 8 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 4) 14570 ret void 14571} 14572 14573 14574define void @test_vsoxseg5_nxv1bf16_triscv.vector.tuple_nxv2i8_5t_nxv1i8(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) %val, ptr %base, <vscale x 1 x i8> %index, i64 %vl) { 14575; CHECK-LABEL: test_vsoxseg5_nxv1bf16_triscv.vector.tuple_nxv2i8_5t_nxv1i8: 14576; CHECK: # %bb.0: # %entry 14577; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma 14578; CHECK-NEXT: vsoxseg5ei8.v v8, (a0), v13 14579; CHECK-NEXT: ret 14580entry: 14581 tail call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i8(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) %val, ptr %base, <vscale x 1 x i8> %index, i64 %vl, i64 4) 14582 ret void 14583} 14584 14585define void @test_vsoxseg5_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_5t_nxv1i8(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) %val, ptr %base, <vscale x 1 x i8> %index, i64 %vl, <vscale x 1 x i1> %mask) { 14586; CHECK-LABEL: test_vsoxseg5_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_5t_nxv1i8: 14587; CHECK: # %bb.0: # %entry 14588; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma 14589; CHECK-NEXT: vsoxseg5ei8.v v8, (a0), v13, v0.t 14590; CHECK-NEXT: ret 14591entry: 14592 tail call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 4) 14593 ret void 14594} 14595 14596 14597define void @test_vsoxseg5_nxv1bf16_triscv.vector.tuple_nxv2i8_5t_nxv1i16(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl) { 14598; CHECK-LABEL: test_vsoxseg5_nxv1bf16_triscv.vector.tuple_nxv2i8_5t_nxv1i16: 14599; CHECK: # %bb.0: # %entry 14600; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma 14601; CHECK-NEXT: vsoxseg5ei16.v v8, (a0), v13 14602; CHECK-NEXT: ret 14603entry: 14604 tail call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i16(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl, i64 4) 14605 ret void 14606} 14607 14608define void @test_vsoxseg5_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_5t_nxv1i16(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl, <vscale x 1 x i1> %mask) { 14609; CHECK-LABEL: test_vsoxseg5_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_5t_nxv1i16: 14610; CHECK: # %bb.0: # %entry 14611; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma 14612; CHECK-NEXT: vsoxseg5ei16.v v8, (a0), v13, v0.t 14613; CHECK-NEXT: ret 14614entry: 14615 tail call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 4) 14616 ret void 14617} 14618 14619 14620define void @test_vsoxseg5_nxv1bf16_triscv.vector.tuple_nxv2i8_5t_nxv1i32(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) %val, ptr %base, <vscale x 1 x i32> %index, i64 %vl) { 14621; CHECK-LABEL: test_vsoxseg5_nxv1bf16_triscv.vector.tuple_nxv2i8_5t_nxv1i32: 14622; CHECK: # %bb.0: # %entry 14623; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma 14624; CHECK-NEXT: vsoxseg5ei32.v v8, (a0), v13 14625; CHECK-NEXT: ret 14626entry: 14627 tail call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i32(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) %val, ptr %base, <vscale x 1 x i32> %index, i64 %vl, i64 4) 14628 ret void 14629} 14630 14631define void @test_vsoxseg5_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_5t_nxv1i32(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) %val, ptr %base, <vscale x 1 x i32> %index, i64 %vl, <vscale x 1 x i1> %mask) { 14632; CHECK-LABEL: test_vsoxseg5_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_5t_nxv1i32: 14633; CHECK: # %bb.0: # %entry 14634; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma 14635; CHECK-NEXT: vsoxseg5ei32.v v8, (a0), v13, v0.t 14636; CHECK-NEXT: ret 14637entry: 14638 tail call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 4) 14639 ret void 14640} 14641 14642 14643define void @test_vsoxseg5_nxv1bf16_triscv.vector.tuple_nxv2i8_5t_nxv1i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) %val, ptr %base, <vscale x 1 x i64> %index, i64 %vl) { 14644; CHECK-LABEL: test_vsoxseg5_nxv1bf16_triscv.vector.tuple_nxv2i8_5t_nxv1i64: 14645; CHECK: # %bb.0: # %entry 14646; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma 14647; CHECK-NEXT: vsoxseg5ei64.v v8, (a0), v13 14648; CHECK-NEXT: ret 14649entry: 14650 tail call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) %val, ptr %base, <vscale x 1 x i64> %index, i64 %vl, i64 4) 14651 ret void 14652} 14653 14654define void @test_vsoxseg5_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_5t_nxv1i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) %val, ptr %base, <vscale x 1 x i64> %index, i64 %vl, <vscale x 1 x i1> %mask) { 14655; CHECK-LABEL: test_vsoxseg5_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_5t_nxv1i64: 14656; CHECK: # %bb.0: # %entry 14657; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma 14658; CHECK-NEXT: vsoxseg5ei64.v v8, (a0), v13, v0.t 14659; CHECK-NEXT: ret 14660entry: 14661 tail call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i64.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) %val, ptr %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 4) 14662 ret void 14663} 14664 14665 14666define void @test_vsoxseg5_nxv2bf16_triscv.vector.tuple_nxv4i8_5t_nxv2i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) %val, ptr %base, <vscale x 2 x i8> %index, i64 %vl) { 14667; CHECK-LABEL: test_vsoxseg5_nxv2bf16_triscv.vector.tuple_nxv4i8_5t_nxv2i8: 14668; CHECK: # %bb.0: # %entry 14669; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma 14670; CHECK-NEXT: vsoxseg5ei8.v v8, (a0), v13 14671; CHECK-NEXT: ret 14672entry: 14673 tail call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) %val, ptr %base, <vscale x 2 x i8> %index, i64 %vl, i64 4) 14674 ret void 14675} 14676 14677define void @test_vsoxseg5_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_5t_nxv2i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) %val, ptr %base, <vscale x 2 x i8> %index, i64 %vl, <vscale x 2 x i1> %mask) { 14678; CHECK-LABEL: test_vsoxseg5_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_5t_nxv2i8: 14679; CHECK: # %bb.0: # %entry 14680; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma 14681; CHECK-NEXT: vsoxseg5ei8.v v8, (a0), v13, v0.t 14682; CHECK-NEXT: ret 14683entry: 14684 tail call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 4) 14685 ret void 14686} 14687 14688 14689define void @test_vsoxseg5_nxv2bf16_triscv.vector.tuple_nxv4i8_5t_nxv2i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) %val, ptr %base, <vscale x 2 x i16> %index, i64 %vl) { 14690; CHECK-LABEL: test_vsoxseg5_nxv2bf16_triscv.vector.tuple_nxv4i8_5t_nxv2i16: 14691; CHECK: # %bb.0: # %entry 14692; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma 14693; CHECK-NEXT: vsoxseg5ei16.v v8, (a0), v13 14694; CHECK-NEXT: ret 14695entry: 14696 tail call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) %val, ptr %base, <vscale x 2 x i16> %index, i64 %vl, i64 4) 14697 ret void 14698} 14699 14700define void @test_vsoxseg5_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_5t_nxv2i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) %val, ptr %base, <vscale x 2 x i16> %index, i64 %vl, <vscale x 2 x i1> %mask) { 14701; CHECK-LABEL: test_vsoxseg5_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_5t_nxv2i16: 14702; CHECK: # %bb.0: # %entry 14703; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma 14704; CHECK-NEXT: vsoxseg5ei16.v v8, (a0), v13, v0.t 14705; CHECK-NEXT: ret 14706entry: 14707 tail call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 4) 14708 ret void 14709} 14710 14711 14712define void @test_vsoxseg5_nxv2bf16_triscv.vector.tuple_nxv4i8_5t_nxv2i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) %val, ptr %base, <vscale x 2 x i32> %index, i64 %vl) { 14713; CHECK-LABEL: test_vsoxseg5_nxv2bf16_triscv.vector.tuple_nxv4i8_5t_nxv2i32: 14714; CHECK: # %bb.0: # %entry 14715; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma 14716; CHECK-NEXT: vsoxseg5ei32.v v8, (a0), v13 14717; CHECK-NEXT: ret 14718entry: 14719 tail call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) %val, ptr %base, <vscale x 2 x i32> %index, i64 %vl, i64 4) 14720 ret void 14721} 14722 14723define void @test_vsoxseg5_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_5t_nxv2i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) %val, ptr %base, <vscale x 2 x i32> %index, i64 %vl, <vscale x 2 x i1> %mask) { 14724; CHECK-LABEL: test_vsoxseg5_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_5t_nxv2i32: 14725; CHECK: # %bb.0: # %entry 14726; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma 14727; CHECK-NEXT: vsoxseg5ei32.v v8, (a0), v13, v0.t 14728; CHECK-NEXT: ret 14729entry: 14730 tail call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 4) 14731 ret void 14732} 14733 14734 14735define void @test_vsoxseg5_nxv2bf16_triscv.vector.tuple_nxv4i8_5t_nxv2i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl) { 14736; CHECK-LABEL: test_vsoxseg5_nxv2bf16_triscv.vector.tuple_nxv4i8_5t_nxv2i64: 14737; CHECK: # %bb.0: # %entry 14738; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma 14739; CHECK-NEXT: vsoxseg5ei64.v v8, (a0), v14 14740; CHECK-NEXT: ret 14741entry: 14742 tail call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl, i64 4) 14743 ret void 14744} 14745 14746define void @test_vsoxseg5_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_5t_nxv2i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl, <vscale x 2 x i1> %mask) { 14747; CHECK-LABEL: test_vsoxseg5_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_5t_nxv2i64: 14748; CHECK: # %bb.0: # %entry 14749; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma 14750; CHECK-NEXT: vsoxseg5ei64.v v8, (a0), v14, v0.t 14751; CHECK-NEXT: ret 14752entry: 14753 tail call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i64.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) %val, ptr %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 4) 14754 ret void 14755} 14756 14757 14758define void @test_vsoxseg5_nxv4bf16_triscv.vector.tuple_nxv8i8_5t_nxv4i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, <vscale x 4 x i8> %index, i64 %vl) { 14759; CHECK-LABEL: test_vsoxseg5_nxv4bf16_triscv.vector.tuple_nxv8i8_5t_nxv4i8: 14760; CHECK: # %bb.0: # %entry 14761; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma 14762; CHECK-NEXT: vsoxseg5ei8.v v8, (a0), v13 14763; CHECK-NEXT: ret 14764entry: 14765 tail call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, <vscale x 4 x i8> %index, i64 %vl, i64 4) 14766 ret void 14767} 14768 14769define void @test_vsoxseg5_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_5t_nxv4i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, <vscale x 4 x i8> %index, i64 %vl, <vscale x 4 x i1> %mask) { 14770; CHECK-LABEL: test_vsoxseg5_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_5t_nxv4i8: 14771; CHECK: # %bb.0: # %entry 14772; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma 14773; CHECK-NEXT: vsoxseg5ei8.v v8, (a0), v13, v0.t 14774; CHECK-NEXT: ret 14775entry: 14776 tail call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 4) 14777 ret void 14778} 14779 14780 14781define void @test_vsoxseg5_nxv4bf16_triscv.vector.tuple_nxv8i8_5t_nxv4i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, <vscale x 4 x i16> %index, i64 %vl) { 14782; CHECK-LABEL: test_vsoxseg5_nxv4bf16_triscv.vector.tuple_nxv8i8_5t_nxv4i16: 14783; CHECK: # %bb.0: # %entry 14784; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma 14785; CHECK-NEXT: vsoxseg5ei16.v v8, (a0), v13 14786; CHECK-NEXT: ret 14787entry: 14788 tail call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, <vscale x 4 x i16> %index, i64 %vl, i64 4) 14789 ret void 14790} 14791 14792define void @test_vsoxseg5_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_5t_nxv4i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, <vscale x 4 x i16> %index, i64 %vl, <vscale x 4 x i1> %mask) { 14793; CHECK-LABEL: test_vsoxseg5_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_5t_nxv4i16: 14794; CHECK: # %bb.0: # %entry 14795; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma 14796; CHECK-NEXT: vsoxseg5ei16.v v8, (a0), v13, v0.t 14797; CHECK-NEXT: ret 14798entry: 14799 tail call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 4) 14800 ret void 14801} 14802 14803 14804define void @test_vsoxseg5_nxv4bf16_triscv.vector.tuple_nxv8i8_5t_nxv4i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, <vscale x 4 x i32> %index, i64 %vl) { 14805; CHECK-LABEL: test_vsoxseg5_nxv4bf16_triscv.vector.tuple_nxv8i8_5t_nxv4i32: 14806; CHECK: # %bb.0: # %entry 14807; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma 14808; CHECK-NEXT: vsoxseg5ei32.v v8, (a0), v14 14809; CHECK-NEXT: ret 14810entry: 14811 tail call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, <vscale x 4 x i32> %index, i64 %vl, i64 4) 14812 ret void 14813} 14814 14815define void @test_vsoxseg5_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_5t_nxv4i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, <vscale x 4 x i32> %index, i64 %vl, <vscale x 4 x i1> %mask) { 14816; CHECK-LABEL: test_vsoxseg5_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_5t_nxv4i32: 14817; CHECK: # %bb.0: # %entry 14818; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma 14819; CHECK-NEXT: vsoxseg5ei32.v v8, (a0), v14, v0.t 14820; CHECK-NEXT: ret 14821entry: 14822 tail call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 4) 14823 ret void 14824} 14825 14826 14827define void @test_vsoxseg5_nxv4bf16_triscv.vector.tuple_nxv8i8_5t_nxv4i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, <vscale x 4 x i64> %index, i64 %vl) { 14828; CHECK-LABEL: test_vsoxseg5_nxv4bf16_triscv.vector.tuple_nxv8i8_5t_nxv4i64: 14829; CHECK: # %bb.0: # %entry 14830; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma 14831; CHECK-NEXT: vsoxseg5ei64.v v8, (a0), v16 14832; CHECK-NEXT: ret 14833entry: 14834 tail call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, <vscale x 4 x i64> %index, i64 %vl, i64 4) 14835 ret void 14836} 14837 14838define void @test_vsoxseg5_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_5t_nxv4i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, <vscale x 4 x i64> %index, i64 %vl, <vscale x 4 x i1> %mask) { 14839; CHECK-LABEL: test_vsoxseg5_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_5t_nxv4i64: 14840; CHECK: # %bb.0: # %entry 14841; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma 14842; CHECK-NEXT: vsoxseg5ei64.v v8, (a0), v16, v0.t 14843; CHECK-NEXT: ret 14844entry: 14845 tail call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i64.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 4) 14846 ret void 14847} 14848 14849 14850define void @test_vsoxseg6_nxv1bf16_triscv.vector.tuple_nxv2i8_6t_nxv1i8(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) %val, ptr %base, <vscale x 1 x i8> %index, i64 %vl) { 14851; CHECK-LABEL: test_vsoxseg6_nxv1bf16_triscv.vector.tuple_nxv2i8_6t_nxv1i8: 14852; CHECK: # %bb.0: # %entry 14853; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma 14854; CHECK-NEXT: vsoxseg6ei8.v v8, (a0), v14 14855; CHECK-NEXT: ret 14856entry: 14857 tail call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i8(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) %val, ptr %base, <vscale x 1 x i8> %index, i64 %vl, i64 4) 14858 ret void 14859} 14860 14861define void @test_vsoxseg6_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_6t_nxv1i8(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) %val, ptr %base, <vscale x 1 x i8> %index, i64 %vl, <vscale x 1 x i1> %mask) { 14862; CHECK-LABEL: test_vsoxseg6_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_6t_nxv1i8: 14863; CHECK: # %bb.0: # %entry 14864; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma 14865; CHECK-NEXT: vsoxseg6ei8.v v8, (a0), v14, v0.t 14866; CHECK-NEXT: ret 14867entry: 14868 tail call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 4) 14869 ret void 14870} 14871 14872 14873define void @test_vsoxseg6_nxv1bf16_triscv.vector.tuple_nxv2i8_6t_nxv1i16(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl) { 14874; CHECK-LABEL: test_vsoxseg6_nxv1bf16_triscv.vector.tuple_nxv2i8_6t_nxv1i16: 14875; CHECK: # %bb.0: # %entry 14876; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma 14877; CHECK-NEXT: vsoxseg6ei16.v v8, (a0), v14 14878; CHECK-NEXT: ret 14879entry: 14880 tail call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i16(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl, i64 4) 14881 ret void 14882} 14883 14884define void @test_vsoxseg6_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_6t_nxv1i16(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl, <vscale x 1 x i1> %mask) { 14885; CHECK-LABEL: test_vsoxseg6_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_6t_nxv1i16: 14886; CHECK: # %bb.0: # %entry 14887; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma 14888; CHECK-NEXT: vsoxseg6ei16.v v8, (a0), v14, v0.t 14889; CHECK-NEXT: ret 14890entry: 14891 tail call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 4) 14892 ret void 14893} 14894 14895 14896define void @test_vsoxseg6_nxv1bf16_triscv.vector.tuple_nxv2i8_6t_nxv1i32(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) %val, ptr %base, <vscale x 1 x i32> %index, i64 %vl) { 14897; CHECK-LABEL: test_vsoxseg6_nxv1bf16_triscv.vector.tuple_nxv2i8_6t_nxv1i32: 14898; CHECK: # %bb.0: # %entry 14899; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma 14900; CHECK-NEXT: vsoxseg6ei32.v v8, (a0), v14 14901; CHECK-NEXT: ret 14902entry: 14903 tail call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i32(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) %val, ptr %base, <vscale x 1 x i32> %index, i64 %vl, i64 4) 14904 ret void 14905} 14906 14907define void @test_vsoxseg6_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_6t_nxv1i32(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) %val, ptr %base, <vscale x 1 x i32> %index, i64 %vl, <vscale x 1 x i1> %mask) { 14908; CHECK-LABEL: test_vsoxseg6_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_6t_nxv1i32: 14909; CHECK: # %bb.0: # %entry 14910; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma 14911; CHECK-NEXT: vsoxseg6ei32.v v8, (a0), v14, v0.t 14912; CHECK-NEXT: ret 14913entry: 14914 tail call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 4) 14915 ret void 14916} 14917 14918 14919define void @test_vsoxseg6_nxv1bf16_triscv.vector.tuple_nxv2i8_6t_nxv1i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) %val, ptr %base, <vscale x 1 x i64> %index, i64 %vl) { 14920; CHECK-LABEL: test_vsoxseg6_nxv1bf16_triscv.vector.tuple_nxv2i8_6t_nxv1i64: 14921; CHECK: # %bb.0: # %entry 14922; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma 14923; CHECK-NEXT: vsoxseg6ei64.v v8, (a0), v14 14924; CHECK-NEXT: ret 14925entry: 14926 tail call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) %val, ptr %base, <vscale x 1 x i64> %index, i64 %vl, i64 4) 14927 ret void 14928} 14929 14930define void @test_vsoxseg6_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_6t_nxv1i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) %val, ptr %base, <vscale x 1 x i64> %index, i64 %vl, <vscale x 1 x i1> %mask) { 14931; CHECK-LABEL: test_vsoxseg6_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_6t_nxv1i64: 14932; CHECK: # %bb.0: # %entry 14933; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma 14934; CHECK-NEXT: vsoxseg6ei64.v v8, (a0), v14, v0.t 14935; CHECK-NEXT: ret 14936entry: 14937 tail call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i64.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) %val, ptr %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 4) 14938 ret void 14939} 14940 14941 14942define void @test_vsoxseg6_nxv2bf16_triscv.vector.tuple_nxv4i8_6t_nxv2i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) %val, ptr %base, <vscale x 2 x i8> %index, i64 %vl) { 14943; CHECK-LABEL: test_vsoxseg6_nxv2bf16_triscv.vector.tuple_nxv4i8_6t_nxv2i8: 14944; CHECK: # %bb.0: # %entry 14945; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma 14946; CHECK-NEXT: vsoxseg6ei8.v v8, (a0), v14 14947; CHECK-NEXT: ret 14948entry: 14949 tail call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) %val, ptr %base, <vscale x 2 x i8> %index, i64 %vl, i64 4) 14950 ret void 14951} 14952 14953define void @test_vsoxseg6_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_6t_nxv2i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) %val, ptr %base, <vscale x 2 x i8> %index, i64 %vl, <vscale x 2 x i1> %mask) { 14954; CHECK-LABEL: test_vsoxseg6_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_6t_nxv2i8: 14955; CHECK: # %bb.0: # %entry 14956; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma 14957; CHECK-NEXT: vsoxseg6ei8.v v8, (a0), v14, v0.t 14958; CHECK-NEXT: ret 14959entry: 14960 tail call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 4) 14961 ret void 14962} 14963 14964 14965define void @test_vsoxseg6_nxv2bf16_triscv.vector.tuple_nxv4i8_6t_nxv2i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) %val, ptr %base, <vscale x 2 x i16> %index, i64 %vl) { 14966; CHECK-LABEL: test_vsoxseg6_nxv2bf16_triscv.vector.tuple_nxv4i8_6t_nxv2i16: 14967; CHECK: # %bb.0: # %entry 14968; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma 14969; CHECK-NEXT: vsoxseg6ei16.v v8, (a0), v14 14970; CHECK-NEXT: ret 14971entry: 14972 tail call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) %val, ptr %base, <vscale x 2 x i16> %index, i64 %vl, i64 4) 14973 ret void 14974} 14975 14976define void @test_vsoxseg6_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_6t_nxv2i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) %val, ptr %base, <vscale x 2 x i16> %index, i64 %vl, <vscale x 2 x i1> %mask) { 14977; CHECK-LABEL: test_vsoxseg6_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_6t_nxv2i16: 14978; CHECK: # %bb.0: # %entry 14979; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma 14980; CHECK-NEXT: vsoxseg6ei16.v v8, (a0), v14, v0.t 14981; CHECK-NEXT: ret 14982entry: 14983 tail call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 4) 14984 ret void 14985} 14986 14987 14988define void @test_vsoxseg6_nxv2bf16_triscv.vector.tuple_nxv4i8_6t_nxv2i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) %val, ptr %base, <vscale x 2 x i32> %index, i64 %vl) { 14989; CHECK-LABEL: test_vsoxseg6_nxv2bf16_triscv.vector.tuple_nxv4i8_6t_nxv2i32: 14990; CHECK: # %bb.0: # %entry 14991; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma 14992; CHECK-NEXT: vsoxseg6ei32.v v8, (a0), v14 14993; CHECK-NEXT: ret 14994entry: 14995 tail call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) %val, ptr %base, <vscale x 2 x i32> %index, i64 %vl, i64 4) 14996 ret void 14997} 14998 14999define void @test_vsoxseg6_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_6t_nxv2i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) %val, ptr %base, <vscale x 2 x i32> %index, i64 %vl, <vscale x 2 x i1> %mask) { 15000; CHECK-LABEL: test_vsoxseg6_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_6t_nxv2i32: 15001; CHECK: # %bb.0: # %entry 15002; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma 15003; CHECK-NEXT: vsoxseg6ei32.v v8, (a0), v14, v0.t 15004; CHECK-NEXT: ret 15005entry: 15006 tail call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 4) 15007 ret void 15008} 15009 15010 15011define void @test_vsoxseg6_nxv2bf16_triscv.vector.tuple_nxv4i8_6t_nxv2i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl) { 15012; CHECK-LABEL: test_vsoxseg6_nxv2bf16_triscv.vector.tuple_nxv4i8_6t_nxv2i64: 15013; CHECK: # %bb.0: # %entry 15014; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma 15015; CHECK-NEXT: vsoxseg6ei64.v v8, (a0), v14 15016; CHECK-NEXT: ret 15017entry: 15018 tail call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl, i64 4) 15019 ret void 15020} 15021 15022define void @test_vsoxseg6_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_6t_nxv2i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl, <vscale x 2 x i1> %mask) { 15023; CHECK-LABEL: test_vsoxseg6_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_6t_nxv2i64: 15024; CHECK: # %bb.0: # %entry 15025; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma 15026; CHECK-NEXT: vsoxseg6ei64.v v8, (a0), v14, v0.t 15027; CHECK-NEXT: ret 15028entry: 15029 tail call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i64.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) %val, ptr %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 4) 15030 ret void 15031} 15032 15033 15034define void @test_vsoxseg6_nxv4bf16_triscv.vector.tuple_nxv8i8_6t_nxv4i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, <vscale x 4 x i8> %index, i64 %vl) { 15035; CHECK-LABEL: test_vsoxseg6_nxv4bf16_triscv.vector.tuple_nxv8i8_6t_nxv4i8: 15036; CHECK: # %bb.0: # %entry 15037; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma 15038; CHECK-NEXT: vsoxseg6ei8.v v8, (a0), v14 15039; CHECK-NEXT: ret 15040entry: 15041 tail call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, <vscale x 4 x i8> %index, i64 %vl, i64 4) 15042 ret void 15043} 15044 15045define void @test_vsoxseg6_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_6t_nxv4i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, <vscale x 4 x i8> %index, i64 %vl, <vscale x 4 x i1> %mask) { 15046; CHECK-LABEL: test_vsoxseg6_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_6t_nxv4i8: 15047; CHECK: # %bb.0: # %entry 15048; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma 15049; CHECK-NEXT: vsoxseg6ei8.v v8, (a0), v14, v0.t 15050; CHECK-NEXT: ret 15051entry: 15052 tail call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 4) 15053 ret void 15054} 15055 15056 15057define void @test_vsoxseg6_nxv4bf16_triscv.vector.tuple_nxv8i8_6t_nxv4i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, <vscale x 4 x i16> %index, i64 %vl) { 15058; CHECK-LABEL: test_vsoxseg6_nxv4bf16_triscv.vector.tuple_nxv8i8_6t_nxv4i16: 15059; CHECK: # %bb.0: # %entry 15060; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma 15061; CHECK-NEXT: vsoxseg6ei16.v v8, (a0), v14 15062; CHECK-NEXT: ret 15063entry: 15064 tail call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, <vscale x 4 x i16> %index, i64 %vl, i64 4) 15065 ret void 15066} 15067 15068define void @test_vsoxseg6_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_6t_nxv4i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, <vscale x 4 x i16> %index, i64 %vl, <vscale x 4 x i1> %mask) { 15069; CHECK-LABEL: test_vsoxseg6_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_6t_nxv4i16: 15070; CHECK: # %bb.0: # %entry 15071; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma 15072; CHECK-NEXT: vsoxseg6ei16.v v8, (a0), v14, v0.t 15073; CHECK-NEXT: ret 15074entry: 15075 tail call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 4) 15076 ret void 15077} 15078 15079 15080define void @test_vsoxseg6_nxv4bf16_triscv.vector.tuple_nxv8i8_6t_nxv4i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, <vscale x 4 x i32> %index, i64 %vl) { 15081; CHECK-LABEL: test_vsoxseg6_nxv4bf16_triscv.vector.tuple_nxv8i8_6t_nxv4i32: 15082; CHECK: # %bb.0: # %entry 15083; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma 15084; CHECK-NEXT: vsoxseg6ei32.v v8, (a0), v14 15085; CHECK-NEXT: ret 15086entry: 15087 tail call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, <vscale x 4 x i32> %index, i64 %vl, i64 4) 15088 ret void 15089} 15090 15091define void @test_vsoxseg6_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_6t_nxv4i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, <vscale x 4 x i32> %index, i64 %vl, <vscale x 4 x i1> %mask) { 15092; CHECK-LABEL: test_vsoxseg6_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_6t_nxv4i32: 15093; CHECK: # %bb.0: # %entry 15094; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma 15095; CHECK-NEXT: vsoxseg6ei32.v v8, (a0), v14, v0.t 15096; CHECK-NEXT: ret 15097entry: 15098 tail call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 4) 15099 ret void 15100} 15101 15102 15103define void @test_vsoxseg6_nxv4bf16_triscv.vector.tuple_nxv8i8_6t_nxv4i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, <vscale x 4 x i64> %index, i64 %vl) { 15104; CHECK-LABEL: test_vsoxseg6_nxv4bf16_triscv.vector.tuple_nxv8i8_6t_nxv4i64: 15105; CHECK: # %bb.0: # %entry 15106; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma 15107; CHECK-NEXT: vsoxseg6ei64.v v8, (a0), v16 15108; CHECK-NEXT: ret 15109entry: 15110 tail call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, <vscale x 4 x i64> %index, i64 %vl, i64 4) 15111 ret void 15112} 15113 15114define void @test_vsoxseg6_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_6t_nxv4i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, <vscale x 4 x i64> %index, i64 %vl, <vscale x 4 x i1> %mask) { 15115; CHECK-LABEL: test_vsoxseg6_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_6t_nxv4i64: 15116; CHECK: # %bb.0: # %entry 15117; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma 15118; CHECK-NEXT: vsoxseg6ei64.v v8, (a0), v16, v0.t 15119; CHECK-NEXT: ret 15120entry: 15121 tail call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i64.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 4) 15122 ret void 15123} 15124 15125 15126define void @test_vsoxseg7_nxv1bf16_triscv.vector.tuple_nxv2i8_7t_nxv1i8(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) %val, ptr %base, <vscale x 1 x i8> %index, i64 %vl) { 15127; CHECK-LABEL: test_vsoxseg7_nxv1bf16_triscv.vector.tuple_nxv2i8_7t_nxv1i8: 15128; CHECK: # %bb.0: # %entry 15129; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma 15130; CHECK-NEXT: vsoxseg7ei8.v v8, (a0), v15 15131; CHECK-NEXT: ret 15132entry: 15133 tail call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i8(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) %val, ptr %base, <vscale x 1 x i8> %index, i64 %vl, i64 4) 15134 ret void 15135} 15136 15137define void @test_vsoxseg7_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_7t_nxv1i8(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) %val, ptr %base, <vscale x 1 x i8> %index, i64 %vl, <vscale x 1 x i1> %mask) { 15138; CHECK-LABEL: test_vsoxseg7_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_7t_nxv1i8: 15139; CHECK: # %bb.0: # %entry 15140; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma 15141; CHECK-NEXT: vsoxseg7ei8.v v8, (a0), v15, v0.t 15142; CHECK-NEXT: ret 15143entry: 15144 tail call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 4) 15145 ret void 15146} 15147 15148 15149define void @test_vsoxseg7_nxv1bf16_triscv.vector.tuple_nxv2i8_7t_nxv1i16(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl) { 15150; CHECK-LABEL: test_vsoxseg7_nxv1bf16_triscv.vector.tuple_nxv2i8_7t_nxv1i16: 15151; CHECK: # %bb.0: # %entry 15152; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma 15153; CHECK-NEXT: vsoxseg7ei16.v v8, (a0), v15 15154; CHECK-NEXT: ret 15155entry: 15156 tail call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i16(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl, i64 4) 15157 ret void 15158} 15159 15160define void @test_vsoxseg7_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_7t_nxv1i16(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl, <vscale x 1 x i1> %mask) { 15161; CHECK-LABEL: test_vsoxseg7_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_7t_nxv1i16: 15162; CHECK: # %bb.0: # %entry 15163; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma 15164; CHECK-NEXT: vsoxseg7ei16.v v8, (a0), v15, v0.t 15165; CHECK-NEXT: ret 15166entry: 15167 tail call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 4) 15168 ret void 15169} 15170 15171 15172define void @test_vsoxseg7_nxv1bf16_triscv.vector.tuple_nxv2i8_7t_nxv1i32(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) %val, ptr %base, <vscale x 1 x i32> %index, i64 %vl) { 15173; CHECK-LABEL: test_vsoxseg7_nxv1bf16_triscv.vector.tuple_nxv2i8_7t_nxv1i32: 15174; CHECK: # %bb.0: # %entry 15175; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma 15176; CHECK-NEXT: vsoxseg7ei32.v v8, (a0), v15 15177; CHECK-NEXT: ret 15178entry: 15179 tail call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i32(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) %val, ptr %base, <vscale x 1 x i32> %index, i64 %vl, i64 4) 15180 ret void 15181} 15182 15183define void @test_vsoxseg7_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_7t_nxv1i32(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) %val, ptr %base, <vscale x 1 x i32> %index, i64 %vl, <vscale x 1 x i1> %mask) { 15184; CHECK-LABEL: test_vsoxseg7_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_7t_nxv1i32: 15185; CHECK: # %bb.0: # %entry 15186; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma 15187; CHECK-NEXT: vsoxseg7ei32.v v8, (a0), v15, v0.t 15188; CHECK-NEXT: ret 15189entry: 15190 tail call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 4) 15191 ret void 15192} 15193 15194 15195define void @test_vsoxseg7_nxv1bf16_triscv.vector.tuple_nxv2i8_7t_nxv1i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) %val, ptr %base, <vscale x 1 x i64> %index, i64 %vl) { 15196; CHECK-LABEL: test_vsoxseg7_nxv1bf16_triscv.vector.tuple_nxv2i8_7t_nxv1i64: 15197; CHECK: # %bb.0: # %entry 15198; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma 15199; CHECK-NEXT: vsoxseg7ei64.v v8, (a0), v15 15200; CHECK-NEXT: ret 15201entry: 15202 tail call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) %val, ptr %base, <vscale x 1 x i64> %index, i64 %vl, i64 4) 15203 ret void 15204} 15205 15206define void @test_vsoxseg7_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_7t_nxv1i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) %val, ptr %base, <vscale x 1 x i64> %index, i64 %vl, <vscale x 1 x i1> %mask) { 15207; CHECK-LABEL: test_vsoxseg7_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_7t_nxv1i64: 15208; CHECK: # %bb.0: # %entry 15209; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma 15210; CHECK-NEXT: vsoxseg7ei64.v v8, (a0), v15, v0.t 15211; CHECK-NEXT: ret 15212entry: 15213 tail call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i64.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) %val, ptr %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 4) 15214 ret void 15215} 15216 15217 15218define void @test_vsoxseg7_nxv2bf16_triscv.vector.tuple_nxv4i8_7t_nxv2i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) %val, ptr %base, <vscale x 2 x i8> %index, i64 %vl) { 15219; CHECK-LABEL: test_vsoxseg7_nxv2bf16_triscv.vector.tuple_nxv4i8_7t_nxv2i8: 15220; CHECK: # %bb.0: # %entry 15221; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma 15222; CHECK-NEXT: vsoxseg7ei8.v v8, (a0), v15 15223; CHECK-NEXT: ret 15224entry: 15225 tail call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) %val, ptr %base, <vscale x 2 x i8> %index, i64 %vl, i64 4) 15226 ret void 15227} 15228 15229define void @test_vsoxseg7_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_7t_nxv2i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) %val, ptr %base, <vscale x 2 x i8> %index, i64 %vl, <vscale x 2 x i1> %mask) { 15230; CHECK-LABEL: test_vsoxseg7_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_7t_nxv2i8: 15231; CHECK: # %bb.0: # %entry 15232; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma 15233; CHECK-NEXT: vsoxseg7ei8.v v8, (a0), v15, v0.t 15234; CHECK-NEXT: ret 15235entry: 15236 tail call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 4) 15237 ret void 15238} 15239 15240 15241define void @test_vsoxseg7_nxv2bf16_triscv.vector.tuple_nxv4i8_7t_nxv2i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) %val, ptr %base, <vscale x 2 x i16> %index, i64 %vl) { 15242; CHECK-LABEL: test_vsoxseg7_nxv2bf16_triscv.vector.tuple_nxv4i8_7t_nxv2i16: 15243; CHECK: # %bb.0: # %entry 15244; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma 15245; CHECK-NEXT: vsoxseg7ei16.v v8, (a0), v15 15246; CHECK-NEXT: ret 15247entry: 15248 tail call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) %val, ptr %base, <vscale x 2 x i16> %index, i64 %vl, i64 4) 15249 ret void 15250} 15251 15252define void @test_vsoxseg7_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_7t_nxv2i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) %val, ptr %base, <vscale x 2 x i16> %index, i64 %vl, <vscale x 2 x i1> %mask) { 15253; CHECK-LABEL: test_vsoxseg7_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_7t_nxv2i16: 15254; CHECK: # %bb.0: # %entry 15255; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma 15256; CHECK-NEXT: vsoxseg7ei16.v v8, (a0), v15, v0.t 15257; CHECK-NEXT: ret 15258entry: 15259 tail call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 4) 15260 ret void 15261} 15262 15263 15264define void @test_vsoxseg7_nxv2bf16_triscv.vector.tuple_nxv4i8_7t_nxv2i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) %val, ptr %base, <vscale x 2 x i32> %index, i64 %vl) { 15265; CHECK-LABEL: test_vsoxseg7_nxv2bf16_triscv.vector.tuple_nxv4i8_7t_nxv2i32: 15266; CHECK: # %bb.0: # %entry 15267; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma 15268; CHECK-NEXT: vsoxseg7ei32.v v8, (a0), v15 15269; CHECK-NEXT: ret 15270entry: 15271 tail call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) %val, ptr %base, <vscale x 2 x i32> %index, i64 %vl, i64 4) 15272 ret void 15273} 15274 15275define void @test_vsoxseg7_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_7t_nxv2i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) %val, ptr %base, <vscale x 2 x i32> %index, i64 %vl, <vscale x 2 x i1> %mask) { 15276; CHECK-LABEL: test_vsoxseg7_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_7t_nxv2i32: 15277; CHECK: # %bb.0: # %entry 15278; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma 15279; CHECK-NEXT: vsoxseg7ei32.v v8, (a0), v15, v0.t 15280; CHECK-NEXT: ret 15281entry: 15282 tail call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 4) 15283 ret void 15284} 15285 15286 15287define void @test_vsoxseg7_nxv2bf16_triscv.vector.tuple_nxv4i8_7t_nxv2i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl) { 15288; CHECK-LABEL: test_vsoxseg7_nxv2bf16_triscv.vector.tuple_nxv4i8_7t_nxv2i64: 15289; CHECK: # %bb.0: # %entry 15290; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma 15291; CHECK-NEXT: vsoxseg7ei64.v v8, (a0), v16 15292; CHECK-NEXT: ret 15293entry: 15294 tail call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl, i64 4) 15295 ret void 15296} 15297 15298define void @test_vsoxseg7_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_7t_nxv2i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl, <vscale x 2 x i1> %mask) { 15299; CHECK-LABEL: test_vsoxseg7_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_7t_nxv2i64: 15300; CHECK: # %bb.0: # %entry 15301; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma 15302; CHECK-NEXT: vsoxseg7ei64.v v8, (a0), v16, v0.t 15303; CHECK-NEXT: ret 15304entry: 15305 tail call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i64.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) %val, ptr %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 4) 15306 ret void 15307} 15308 15309 15310define void @test_vsoxseg7_nxv4bf16_triscv.vector.tuple_nxv8i8_7t_nxv4i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, <vscale x 4 x i8> %index, i64 %vl) { 15311; CHECK-LABEL: test_vsoxseg7_nxv4bf16_triscv.vector.tuple_nxv8i8_7t_nxv4i8: 15312; CHECK: # %bb.0: # %entry 15313; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma 15314; CHECK-NEXT: vsoxseg7ei8.v v8, (a0), v15 15315; CHECK-NEXT: ret 15316entry: 15317 tail call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, <vscale x 4 x i8> %index, i64 %vl, i64 4) 15318 ret void 15319} 15320 15321define void @test_vsoxseg7_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_7t_nxv4i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, <vscale x 4 x i8> %index, i64 %vl, <vscale x 4 x i1> %mask) { 15322; CHECK-LABEL: test_vsoxseg7_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_7t_nxv4i8: 15323; CHECK: # %bb.0: # %entry 15324; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma 15325; CHECK-NEXT: vsoxseg7ei8.v v8, (a0), v15, v0.t 15326; CHECK-NEXT: ret 15327entry: 15328 tail call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 4) 15329 ret void 15330} 15331 15332 15333define void @test_vsoxseg7_nxv4bf16_triscv.vector.tuple_nxv8i8_7t_nxv4i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, <vscale x 4 x i16> %index, i64 %vl) { 15334; CHECK-LABEL: test_vsoxseg7_nxv4bf16_triscv.vector.tuple_nxv8i8_7t_nxv4i16: 15335; CHECK: # %bb.0: # %entry 15336; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma 15337; CHECK-NEXT: vsoxseg7ei16.v v8, (a0), v15 15338; CHECK-NEXT: ret 15339entry: 15340 tail call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, <vscale x 4 x i16> %index, i64 %vl, i64 4) 15341 ret void 15342} 15343 15344define void @test_vsoxseg7_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_7t_nxv4i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, <vscale x 4 x i16> %index, i64 %vl, <vscale x 4 x i1> %mask) { 15345; CHECK-LABEL: test_vsoxseg7_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_7t_nxv4i16: 15346; CHECK: # %bb.0: # %entry 15347; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma 15348; CHECK-NEXT: vsoxseg7ei16.v v8, (a0), v15, v0.t 15349; CHECK-NEXT: ret 15350entry: 15351 tail call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 4) 15352 ret void 15353} 15354 15355 15356define void @test_vsoxseg7_nxv4bf16_triscv.vector.tuple_nxv8i8_7t_nxv4i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, <vscale x 4 x i32> %index, i64 %vl) { 15357; CHECK-LABEL: test_vsoxseg7_nxv4bf16_triscv.vector.tuple_nxv8i8_7t_nxv4i32: 15358; CHECK: # %bb.0: # %entry 15359; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma 15360; CHECK-NEXT: vsoxseg7ei32.v v8, (a0), v16 15361; CHECK-NEXT: ret 15362entry: 15363 tail call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, <vscale x 4 x i32> %index, i64 %vl, i64 4) 15364 ret void 15365} 15366 15367define void @test_vsoxseg7_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_7t_nxv4i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, <vscale x 4 x i32> %index, i64 %vl, <vscale x 4 x i1> %mask) { 15368; CHECK-LABEL: test_vsoxseg7_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_7t_nxv4i32: 15369; CHECK: # %bb.0: # %entry 15370; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma 15371; CHECK-NEXT: vsoxseg7ei32.v v8, (a0), v16, v0.t 15372; CHECK-NEXT: ret 15373entry: 15374 tail call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 4) 15375 ret void 15376} 15377 15378 15379define void @test_vsoxseg7_nxv4bf16_triscv.vector.tuple_nxv8i8_7t_nxv4i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, <vscale x 4 x i64> %index, i64 %vl) { 15380; CHECK-LABEL: test_vsoxseg7_nxv4bf16_triscv.vector.tuple_nxv8i8_7t_nxv4i64: 15381; CHECK: # %bb.0: # %entry 15382; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma 15383; CHECK-NEXT: vsoxseg7ei64.v v8, (a0), v16 15384; CHECK-NEXT: ret 15385entry: 15386 tail call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, <vscale x 4 x i64> %index, i64 %vl, i64 4) 15387 ret void 15388} 15389 15390define void @test_vsoxseg7_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_7t_nxv4i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, <vscale x 4 x i64> %index, i64 %vl, <vscale x 4 x i1> %mask) { 15391; CHECK-LABEL: test_vsoxseg7_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_7t_nxv4i64: 15392; CHECK: # %bb.0: # %entry 15393; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma 15394; CHECK-NEXT: vsoxseg7ei64.v v8, (a0), v16, v0.t 15395; CHECK-NEXT: ret 15396entry: 15397 tail call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i64.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 4) 15398 ret void 15399} 15400 15401 15402define void @test_vsoxseg8_nxv1bf16_triscv.vector.tuple_nxv2i8_8t_nxv1i8(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) %val, ptr %base, <vscale x 1 x i8> %index, i64 %vl) { 15403; CHECK-LABEL: test_vsoxseg8_nxv1bf16_triscv.vector.tuple_nxv2i8_8t_nxv1i8: 15404; CHECK: # %bb.0: # %entry 15405; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma 15406; CHECK-NEXT: vsoxseg8ei8.v v8, (a0), v16 15407; CHECK-NEXT: ret 15408entry: 15409 tail call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i8(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) %val, ptr %base, <vscale x 1 x i8> %index, i64 %vl, i64 4) 15410 ret void 15411} 15412 15413define void @test_vsoxseg8_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_8t_nxv1i8(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) %val, ptr %base, <vscale x 1 x i8> %index, i64 %vl, <vscale x 1 x i1> %mask) { 15414; CHECK-LABEL: test_vsoxseg8_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_8t_nxv1i8: 15415; CHECK: # %bb.0: # %entry 15416; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma 15417; CHECK-NEXT: vsoxseg8ei8.v v8, (a0), v16, v0.t 15418; CHECK-NEXT: ret 15419entry: 15420 tail call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 4) 15421 ret void 15422} 15423 15424 15425define void @test_vsoxseg8_nxv1bf16_triscv.vector.tuple_nxv2i8_8t_nxv1i16(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl) { 15426; CHECK-LABEL: test_vsoxseg8_nxv1bf16_triscv.vector.tuple_nxv2i8_8t_nxv1i16: 15427; CHECK: # %bb.0: # %entry 15428; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma 15429; CHECK-NEXT: vsoxseg8ei16.v v8, (a0), v16 15430; CHECK-NEXT: ret 15431entry: 15432 tail call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i16(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl, i64 4) 15433 ret void 15434} 15435 15436define void @test_vsoxseg8_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_8t_nxv1i16(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl, <vscale x 1 x i1> %mask) { 15437; CHECK-LABEL: test_vsoxseg8_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_8t_nxv1i16: 15438; CHECK: # %bb.0: # %entry 15439; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma 15440; CHECK-NEXT: vsoxseg8ei16.v v8, (a0), v16, v0.t 15441; CHECK-NEXT: ret 15442entry: 15443 tail call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 4) 15444 ret void 15445} 15446 15447 15448define void @test_vsoxseg8_nxv1bf16_triscv.vector.tuple_nxv2i8_8t_nxv1i32(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) %val, ptr %base, <vscale x 1 x i32> %index, i64 %vl) { 15449; CHECK-LABEL: test_vsoxseg8_nxv1bf16_triscv.vector.tuple_nxv2i8_8t_nxv1i32: 15450; CHECK: # %bb.0: # %entry 15451; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma 15452; CHECK-NEXT: vsoxseg8ei32.v v8, (a0), v16 15453; CHECK-NEXT: ret 15454entry: 15455 tail call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i32(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) %val, ptr %base, <vscale x 1 x i32> %index, i64 %vl, i64 4) 15456 ret void 15457} 15458 15459define void @test_vsoxseg8_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_8t_nxv1i32(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) %val, ptr %base, <vscale x 1 x i32> %index, i64 %vl, <vscale x 1 x i1> %mask) { 15460; CHECK-LABEL: test_vsoxseg8_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_8t_nxv1i32: 15461; CHECK: # %bb.0: # %entry 15462; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma 15463; CHECK-NEXT: vsoxseg8ei32.v v8, (a0), v16, v0.t 15464; CHECK-NEXT: ret 15465entry: 15466 tail call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 4) 15467 ret void 15468} 15469 15470 15471define void @test_vsoxseg8_nxv1bf16_triscv.vector.tuple_nxv2i8_8t_nxv1i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) %val, ptr %base, <vscale x 1 x i64> %index, i64 %vl) { 15472; CHECK-LABEL: test_vsoxseg8_nxv1bf16_triscv.vector.tuple_nxv2i8_8t_nxv1i64: 15473; CHECK: # %bb.0: # %entry 15474; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma 15475; CHECK-NEXT: vsoxseg8ei64.v v8, (a0), v16 15476; CHECK-NEXT: ret 15477entry: 15478 tail call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) %val, ptr %base, <vscale x 1 x i64> %index, i64 %vl, i64 4) 15479 ret void 15480} 15481 15482define void @test_vsoxseg8_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_8t_nxv1i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) %val, ptr %base, <vscale x 1 x i64> %index, i64 %vl, <vscale x 1 x i1> %mask) { 15483; CHECK-LABEL: test_vsoxseg8_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_8t_nxv1i64: 15484; CHECK: # %bb.0: # %entry 15485; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma 15486; CHECK-NEXT: vsoxseg8ei64.v v8, (a0), v16, v0.t 15487; CHECK-NEXT: ret 15488entry: 15489 tail call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i64.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) %val, ptr %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 4) 15490 ret void 15491} 15492 15493 15494define void @test_vsoxseg8_nxv2bf16_triscv.vector.tuple_nxv4i8_8t_nxv2i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %val, ptr %base, <vscale x 2 x i8> %index, i64 %vl) { 15495; CHECK-LABEL: test_vsoxseg8_nxv2bf16_triscv.vector.tuple_nxv4i8_8t_nxv2i8: 15496; CHECK: # %bb.0: # %entry 15497; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma 15498; CHECK-NEXT: vsoxseg8ei8.v v8, (a0), v16 15499; CHECK-NEXT: ret 15500entry: 15501 tail call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %val, ptr %base, <vscale x 2 x i8> %index, i64 %vl, i64 4) 15502 ret void 15503} 15504 15505define void @test_vsoxseg8_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_8t_nxv2i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %val, ptr %base, <vscale x 2 x i8> %index, i64 %vl, <vscale x 2 x i1> %mask) { 15506; CHECK-LABEL: test_vsoxseg8_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_8t_nxv2i8: 15507; CHECK: # %bb.0: # %entry 15508; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma 15509; CHECK-NEXT: vsoxseg8ei8.v v8, (a0), v16, v0.t 15510; CHECK-NEXT: ret 15511entry: 15512 tail call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 4) 15513 ret void 15514} 15515 15516 15517define void @test_vsoxseg8_nxv2bf16_triscv.vector.tuple_nxv4i8_8t_nxv2i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %val, ptr %base, <vscale x 2 x i16> %index, i64 %vl) { 15518; CHECK-LABEL: test_vsoxseg8_nxv2bf16_triscv.vector.tuple_nxv4i8_8t_nxv2i16: 15519; CHECK: # %bb.0: # %entry 15520; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma 15521; CHECK-NEXT: vsoxseg8ei16.v v8, (a0), v16 15522; CHECK-NEXT: ret 15523entry: 15524 tail call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %val, ptr %base, <vscale x 2 x i16> %index, i64 %vl, i64 4) 15525 ret void 15526} 15527 15528define void @test_vsoxseg8_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_8t_nxv2i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %val, ptr %base, <vscale x 2 x i16> %index, i64 %vl, <vscale x 2 x i1> %mask) { 15529; CHECK-LABEL: test_vsoxseg8_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_8t_nxv2i16: 15530; CHECK: # %bb.0: # %entry 15531; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma 15532; CHECK-NEXT: vsoxseg8ei16.v v8, (a0), v16, v0.t 15533; CHECK-NEXT: ret 15534entry: 15535 tail call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 4) 15536 ret void 15537} 15538 15539 15540define void @test_vsoxseg8_nxv2bf16_triscv.vector.tuple_nxv4i8_8t_nxv2i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %val, ptr %base, <vscale x 2 x i32> %index, i64 %vl) { 15541; CHECK-LABEL: test_vsoxseg8_nxv2bf16_triscv.vector.tuple_nxv4i8_8t_nxv2i32: 15542; CHECK: # %bb.0: # %entry 15543; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma 15544; CHECK-NEXT: vsoxseg8ei32.v v8, (a0), v16 15545; CHECK-NEXT: ret 15546entry: 15547 tail call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %val, ptr %base, <vscale x 2 x i32> %index, i64 %vl, i64 4) 15548 ret void 15549} 15550 15551define void @test_vsoxseg8_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_8t_nxv2i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %val, ptr %base, <vscale x 2 x i32> %index, i64 %vl, <vscale x 2 x i1> %mask) { 15552; CHECK-LABEL: test_vsoxseg8_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_8t_nxv2i32: 15553; CHECK: # %bb.0: # %entry 15554; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma 15555; CHECK-NEXT: vsoxseg8ei32.v v8, (a0), v16, v0.t 15556; CHECK-NEXT: ret 15557entry: 15558 tail call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 4) 15559 ret void 15560} 15561 15562 15563define void @test_vsoxseg8_nxv2bf16_triscv.vector.tuple_nxv4i8_8t_nxv2i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl) { 15564; CHECK-LABEL: test_vsoxseg8_nxv2bf16_triscv.vector.tuple_nxv4i8_8t_nxv2i64: 15565; CHECK: # %bb.0: # %entry 15566; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma 15567; CHECK-NEXT: vsoxseg8ei64.v v8, (a0), v16 15568; CHECK-NEXT: ret 15569entry: 15570 tail call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl, i64 4) 15571 ret void 15572} 15573 15574define void @test_vsoxseg8_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_8t_nxv2i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl, <vscale x 2 x i1> %mask) { 15575; CHECK-LABEL: test_vsoxseg8_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_8t_nxv2i64: 15576; CHECK: # %bb.0: # %entry 15577; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma 15578; CHECK-NEXT: vsoxseg8ei64.v v8, (a0), v16, v0.t 15579; CHECK-NEXT: ret 15580entry: 15581 tail call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i64.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %val, ptr %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 4) 15582 ret void 15583} 15584 15585 15586define void @test_vsoxseg8_nxv4bf16_triscv.vector.tuple_nxv8i8_8t_nxv4i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, <vscale x 4 x i8> %index, i64 %vl) { 15587; CHECK-LABEL: test_vsoxseg8_nxv4bf16_triscv.vector.tuple_nxv8i8_8t_nxv4i8: 15588; CHECK: # %bb.0: # %entry 15589; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma 15590; CHECK-NEXT: vsoxseg8ei8.v v8, (a0), v16 15591; CHECK-NEXT: ret 15592entry: 15593 tail call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, <vscale x 4 x i8> %index, i64 %vl, i64 4) 15594 ret void 15595} 15596 15597define void @test_vsoxseg8_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_8t_nxv4i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, <vscale x 4 x i8> %index, i64 %vl, <vscale x 4 x i1> %mask) { 15598; CHECK-LABEL: test_vsoxseg8_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_8t_nxv4i8: 15599; CHECK: # %bb.0: # %entry 15600; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma 15601; CHECK-NEXT: vsoxseg8ei8.v v8, (a0), v16, v0.t 15602; CHECK-NEXT: ret 15603entry: 15604 tail call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 4) 15605 ret void 15606} 15607 15608 15609define void @test_vsoxseg8_nxv4bf16_triscv.vector.tuple_nxv8i8_8t_nxv4i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, <vscale x 4 x i16> %index, i64 %vl) { 15610; CHECK-LABEL: test_vsoxseg8_nxv4bf16_triscv.vector.tuple_nxv8i8_8t_nxv4i16: 15611; CHECK: # %bb.0: # %entry 15612; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma 15613; CHECK-NEXT: vsoxseg8ei16.v v8, (a0), v16 15614; CHECK-NEXT: ret 15615entry: 15616 tail call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, <vscale x 4 x i16> %index, i64 %vl, i64 4) 15617 ret void 15618} 15619 15620define void @test_vsoxseg8_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_8t_nxv4i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, <vscale x 4 x i16> %index, i64 %vl, <vscale x 4 x i1> %mask) { 15621; CHECK-LABEL: test_vsoxseg8_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_8t_nxv4i16: 15622; CHECK: # %bb.0: # %entry 15623; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma 15624; CHECK-NEXT: vsoxseg8ei16.v v8, (a0), v16, v0.t 15625; CHECK-NEXT: ret 15626entry: 15627 tail call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 4) 15628 ret void 15629} 15630 15631 15632define void @test_vsoxseg8_nxv4bf16_triscv.vector.tuple_nxv8i8_8t_nxv4i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, <vscale x 4 x i32> %index, i64 %vl) { 15633; CHECK-LABEL: test_vsoxseg8_nxv4bf16_triscv.vector.tuple_nxv8i8_8t_nxv4i32: 15634; CHECK: # %bb.0: # %entry 15635; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma 15636; CHECK-NEXT: vsoxseg8ei32.v v8, (a0), v16 15637; CHECK-NEXT: ret 15638entry: 15639 tail call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, <vscale x 4 x i32> %index, i64 %vl, i64 4) 15640 ret void 15641} 15642 15643define void @test_vsoxseg8_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_8t_nxv4i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, <vscale x 4 x i32> %index, i64 %vl, <vscale x 4 x i1> %mask) { 15644; CHECK-LABEL: test_vsoxseg8_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_8t_nxv4i32: 15645; CHECK: # %bb.0: # %entry 15646; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma 15647; CHECK-NEXT: vsoxseg8ei32.v v8, (a0), v16, v0.t 15648; CHECK-NEXT: ret 15649entry: 15650 tail call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 4) 15651 ret void 15652} 15653 15654 15655define void @test_vsoxseg8_nxv4bf16_triscv.vector.tuple_nxv8i8_8t_nxv4i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, <vscale x 4 x i64> %index, i64 %vl) { 15656; CHECK-LABEL: test_vsoxseg8_nxv4bf16_triscv.vector.tuple_nxv8i8_8t_nxv4i64: 15657; CHECK: # %bb.0: # %entry 15658; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma 15659; CHECK-NEXT: vsoxseg8ei64.v v8, (a0), v16 15660; CHECK-NEXT: ret 15661entry: 15662 tail call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, <vscale x 4 x i64> %index, i64 %vl, i64 4) 15663 ret void 15664} 15665 15666define void @test_vsoxseg8_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_8t_nxv4i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, <vscale x 4 x i64> %index, i64 %vl, <vscale x 4 x i1> %mask) { 15667; CHECK-LABEL: test_vsoxseg8_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_8t_nxv4i64: 15668; CHECK: # %bb.0: # %entry 15669; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma 15670; CHECK-NEXT: vsoxseg8ei64.v v8, (a0), v16, v0.t 15671; CHECK-NEXT: ret 15672entry: 15673 tail call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i64.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 4) 15674 ret void 15675} 15676 15677