1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py 2; RUN: llc -mtriple=riscv32 -mattr=+zve64d,+f,+d,+zvfh \ 3; RUN: -verify-machineinstrs < %s | FileCheck %s 4 5declare void @llvm.riscv.vssseg2.triscv.vector.tuple_nxv1i8_2t(target("riscv.vector.tuple", <vscale x 1 x i8>, 2), ptr, i32, i32, i32) 6declare void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i1(target("riscv.vector.tuple", <vscale x 1 x i8>, 2), ptr, i32, <vscale x 1 x i1>, i32, i32) 7 8define void @test_vssseg2_nxv1i8_triscv.vector.tuple_nxv1i8_2t(target("riscv.vector.tuple", <vscale x 1 x i8>, 2) %val, ptr %base, i32 %offset, i32 %vl) { 9; CHECK-LABEL: test_vssseg2_nxv1i8_triscv.vector.tuple_nxv1i8_2t: 10; CHECK: # %bb.0: # %entry 11; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, ma 12; CHECK-NEXT: vssseg2e8.v v8, (a0), a1 13; CHECK-NEXT: ret 14entry: 15 tail call void @llvm.riscv.vssseg2.triscv.vector.tuple_nxv1i8_2t(target("riscv.vector.tuple", <vscale x 1 x i8>, 2) %val, ptr %base, i32 %offset, i32 %vl, i32 3) 16 ret void 17} 18 19define void @test_vssseg2_mask_nxv1i8_triscv.vector.tuple_nxv1i8_2t(target("riscv.vector.tuple", <vscale x 1 x i8>, 2) %val, ptr %base, i32 %offset, i32 %vl, <vscale x 1 x i1> %mask) { 20; CHECK-LABEL: test_vssseg2_mask_nxv1i8_triscv.vector.tuple_nxv1i8_2t: 21; CHECK: # %bb.0: # %entry 22; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, ma 23; CHECK-NEXT: vssseg2e8.v v8, (a0), a1, v0.t 24; CHECK-NEXT: ret 25entry: 26 tail call void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i1(target("riscv.vector.tuple", <vscale x 1 x i8>, 2) %val, ptr %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl, i32 3) 27 ret void 28} 29 30declare void @llvm.riscv.vssseg2.triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", <vscale x 2 x i8>, 2), ptr, i32, i32, i32) 31declare void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 2), ptr, i32, <vscale x 2 x i1>, i32, i32) 32 33define void @test_vssseg2_nxv2i8_triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) %val, ptr %base, i32 %offset, i32 %vl) { 34; CHECK-LABEL: test_vssseg2_nxv2i8_triscv.vector.tuple_nxv2i8_2t: 35; CHECK: # %bb.0: # %entry 36; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, ma 37; CHECK-NEXT: vssseg2e8.v v8, (a0), a1 38; CHECK-NEXT: ret 39entry: 40 tail call void @llvm.riscv.vssseg2.triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) %val, ptr %base, i32 %offset, i32 %vl, i32 3) 41 ret void 42} 43 44define void @test_vssseg2_mask_nxv2i8_triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) %val, ptr %base, i32 %offset, i32 %vl, <vscale x 2 x i1> %mask) { 45; CHECK-LABEL: test_vssseg2_mask_nxv2i8_triscv.vector.tuple_nxv2i8_2t: 46; CHECK: # %bb.0: # %entry 47; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, ma 48; CHECK-NEXT: vssseg2e8.v v8, (a0), a1, v0.t 49; CHECK-NEXT: ret 50entry: 51 tail call void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) %val, ptr %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl, i32 3) 52 ret void 53} 54 55declare void @llvm.riscv.vssseg2.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", <vscale x 4 x i8>, 2), ptr, i32, i32, i32) 56declare void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 2), ptr, i32, <vscale x 4 x i1>, i32, i32) 57 58define void @test_vssseg2_nxv4i8_triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %val, ptr %base, i32 %offset, i32 %vl) { 59; CHECK-LABEL: test_vssseg2_nxv4i8_triscv.vector.tuple_nxv4i8_2t: 60; CHECK: # %bb.0: # %entry 61; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, ma 62; CHECK-NEXT: vssseg2e8.v v8, (a0), a1 63; CHECK-NEXT: ret 64entry: 65 tail call void @llvm.riscv.vssseg2.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %val, ptr %base, i32 %offset, i32 %vl, i32 3) 66 ret void 67} 68 69define void @test_vssseg2_mask_nxv4i8_triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %val, ptr %base, i32 %offset, i32 %vl, <vscale x 4 x i1> %mask) { 70; CHECK-LABEL: test_vssseg2_mask_nxv4i8_triscv.vector.tuple_nxv4i8_2t: 71; CHECK: # %bb.0: # %entry 72; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, ma 73; CHECK-NEXT: vssseg2e8.v v8, (a0), a1, v0.t 74; CHECK-NEXT: ret 75entry: 76 tail call void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %val, ptr %base, i32 %offset, <vscale x 4 x i1> %mask, i32 %vl, i32 3) 77 ret void 78} 79 80declare void @llvm.riscv.vssseg2.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", <vscale x 8 x i8>, 2), ptr, i32, i32, i32) 81declare void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 2), ptr, i32, <vscale x 8 x i1>, i32, i32) 82 83define void @test_vssseg2_nxv8i8_triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, i32 %offset, i32 %vl) { 84; CHECK-LABEL: test_vssseg2_nxv8i8_triscv.vector.tuple_nxv8i8_2t: 85; CHECK: # %bb.0: # %entry 86; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, ma 87; CHECK-NEXT: vssseg2e8.v v8, (a0), a1 88; CHECK-NEXT: ret 89entry: 90 tail call void @llvm.riscv.vssseg2.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, i32 %offset, i32 %vl, i32 3) 91 ret void 92} 93 94define void @test_vssseg2_mask_nxv8i8_triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, i32 %offset, i32 %vl, <vscale x 8 x i1> %mask) { 95; CHECK-LABEL: test_vssseg2_mask_nxv8i8_triscv.vector.tuple_nxv8i8_2t: 96; CHECK: # %bb.0: # %entry 97; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, ma 98; CHECK-NEXT: vssseg2e8.v v8, (a0), a1, v0.t 99; CHECK-NEXT: ret 100entry: 101 tail call void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, i32 %offset, <vscale x 8 x i1> %mask, i32 %vl, i32 3) 102 ret void 103} 104 105declare void @llvm.riscv.vssseg2.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", <vscale x 16 x i8>, 2), ptr, i32, i32, i32) 106declare void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 2), ptr, i32, <vscale x 16 x i1>, i32, i32) 107 108define void @test_vssseg2_nxv16i8_triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, i32 %offset, i32 %vl) { 109; CHECK-LABEL: test_vssseg2_nxv16i8_triscv.vector.tuple_nxv16i8_2t: 110; CHECK: # %bb.0: # %entry 111; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, ma 112; CHECK-NEXT: vssseg2e8.v v8, (a0), a1 113; CHECK-NEXT: ret 114entry: 115 tail call void @llvm.riscv.vssseg2.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, i32 %offset, i32 %vl, i32 3) 116 ret void 117} 118 119define void @test_vssseg2_mask_nxv16i8_triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, i32 %offset, i32 %vl, <vscale x 16 x i1> %mask) { 120; CHECK-LABEL: test_vssseg2_mask_nxv16i8_triscv.vector.tuple_nxv16i8_2t: 121; CHECK: # %bb.0: # %entry 122; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, ma 123; CHECK-NEXT: vssseg2e8.v v8, (a0), a1, v0.t 124; CHECK-NEXT: ret 125entry: 126 tail call void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, i32 %offset, <vscale x 16 x i1> %mask, i32 %vl, i32 3) 127 ret void 128} 129 130declare void @llvm.riscv.vssseg2.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", <vscale x 32 x i8>, 2), ptr, i32, i32, i32) 131declare void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i1(target("riscv.vector.tuple", <vscale x 32 x i8>, 2), ptr, i32, <vscale x 32 x i1>, i32, i32) 132 133define void @test_vssseg2_nxv32i8_triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, i32 %offset, i32 %vl) { 134; CHECK-LABEL: test_vssseg2_nxv32i8_triscv.vector.tuple_nxv32i8_2t: 135; CHECK: # %bb.0: # %entry 136; CHECK-NEXT: vsetvli zero, a2, e8, m4, ta, ma 137; CHECK-NEXT: vssseg2e8.v v8, (a0), a1 138; CHECK-NEXT: ret 139entry: 140 tail call void @llvm.riscv.vssseg2.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, i32 %offset, i32 %vl, i32 3) 141 ret void 142} 143 144define void @test_vssseg2_mask_nxv32i8_triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, i32 %offset, i32 %vl, <vscale x 32 x i1> %mask) { 145; CHECK-LABEL: test_vssseg2_mask_nxv32i8_triscv.vector.tuple_nxv32i8_2t: 146; CHECK: # %bb.0: # %entry 147; CHECK-NEXT: vsetvli zero, a2, e8, m4, ta, ma 148; CHECK-NEXT: vssseg2e8.v v8, (a0), a1, v0.t 149; CHECK-NEXT: ret 150entry: 151 tail call void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i1(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, i32 %offset, <vscale x 32 x i1> %mask, i32 %vl, i32 3) 152 ret void 153} 154 155declare void @llvm.riscv.vssseg3.triscv.vector.tuple_nxv1i8_3t(target("riscv.vector.tuple", <vscale x 1 x i8>, 3), ptr, i32, i32, i32) 156declare void @llvm.riscv.vssseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i1(target("riscv.vector.tuple", <vscale x 1 x i8>, 3), ptr, i32, <vscale x 1 x i1>, i32, i32) 157 158define void @test_vssseg3_nxv1i8_triscv.vector.tuple_nxv1i8_3t(target("riscv.vector.tuple", <vscale x 1 x i8>, 3) %val, ptr %base, i32 %offset, i32 %vl) { 159; CHECK-LABEL: test_vssseg3_nxv1i8_triscv.vector.tuple_nxv1i8_3t: 160; CHECK: # %bb.0: # %entry 161; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, ma 162; CHECK-NEXT: vssseg3e8.v v8, (a0), a1 163; CHECK-NEXT: ret 164entry: 165 tail call void @llvm.riscv.vssseg3.triscv.vector.tuple_nxv1i8_3t(target("riscv.vector.tuple", <vscale x 1 x i8>, 3) %val, ptr %base, i32 %offset, i32 %vl, i32 3) 166 ret void 167} 168 169define void @test_vssseg3_mask_nxv1i8_triscv.vector.tuple_nxv1i8_3t(target("riscv.vector.tuple", <vscale x 1 x i8>, 3) %val, ptr %base, i32 %offset, i32 %vl, <vscale x 1 x i1> %mask) { 170; CHECK-LABEL: test_vssseg3_mask_nxv1i8_triscv.vector.tuple_nxv1i8_3t: 171; CHECK: # %bb.0: # %entry 172; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, ma 173; CHECK-NEXT: vssseg3e8.v v8, (a0), a1, v0.t 174; CHECK-NEXT: ret 175entry: 176 tail call void @llvm.riscv.vssseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i1(target("riscv.vector.tuple", <vscale x 1 x i8>, 3) %val, ptr %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl, i32 3) 177 ret void 178} 179 180declare void @llvm.riscv.vssseg3.triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", <vscale x 2 x i8>, 3), ptr, i32, i32, i32) 181declare void @llvm.riscv.vssseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 3), ptr, i32, <vscale x 2 x i1>, i32, i32) 182 183define void @test_vssseg3_nxv2i8_triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) %val, ptr %base, i32 %offset, i32 %vl) { 184; CHECK-LABEL: test_vssseg3_nxv2i8_triscv.vector.tuple_nxv2i8_3t: 185; CHECK: # %bb.0: # %entry 186; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, ma 187; CHECK-NEXT: vssseg3e8.v v8, (a0), a1 188; CHECK-NEXT: ret 189entry: 190 tail call void @llvm.riscv.vssseg3.triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) %val, ptr %base, i32 %offset, i32 %vl, i32 3) 191 ret void 192} 193 194define void @test_vssseg3_mask_nxv2i8_triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) %val, ptr %base, i32 %offset, i32 %vl, <vscale x 2 x i1> %mask) { 195; CHECK-LABEL: test_vssseg3_mask_nxv2i8_triscv.vector.tuple_nxv2i8_3t: 196; CHECK: # %bb.0: # %entry 197; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, ma 198; CHECK-NEXT: vssseg3e8.v v8, (a0), a1, v0.t 199; CHECK-NEXT: ret 200entry: 201 tail call void @llvm.riscv.vssseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) %val, ptr %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl, i32 3) 202 ret void 203} 204 205declare void @llvm.riscv.vssseg3.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", <vscale x 4 x i8>, 3), ptr, i32, i32, i32) 206declare void @llvm.riscv.vssseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 3), ptr, i32, <vscale x 4 x i1>, i32, i32) 207 208define void @test_vssseg3_nxv4i8_triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %val, ptr %base, i32 %offset, i32 %vl) { 209; CHECK-LABEL: test_vssseg3_nxv4i8_triscv.vector.tuple_nxv4i8_3t: 210; CHECK: # %bb.0: # %entry 211; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, ma 212; CHECK-NEXT: vssseg3e8.v v8, (a0), a1 213; CHECK-NEXT: ret 214entry: 215 tail call void @llvm.riscv.vssseg3.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %val, ptr %base, i32 %offset, i32 %vl, i32 3) 216 ret void 217} 218 219define void @test_vssseg3_mask_nxv4i8_triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %val, ptr %base, i32 %offset, i32 %vl, <vscale x 4 x i1> %mask) { 220; CHECK-LABEL: test_vssseg3_mask_nxv4i8_triscv.vector.tuple_nxv4i8_3t: 221; CHECK: # %bb.0: # %entry 222; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, ma 223; CHECK-NEXT: vssseg3e8.v v8, (a0), a1, v0.t 224; CHECK-NEXT: ret 225entry: 226 tail call void @llvm.riscv.vssseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %val, ptr %base, i32 %offset, <vscale x 4 x i1> %mask, i32 %vl, i32 3) 227 ret void 228} 229 230declare void @llvm.riscv.vssseg3.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", <vscale x 8 x i8>, 3), ptr, i32, i32, i32) 231declare void @llvm.riscv.vssseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 3), ptr, i32, <vscale x 8 x i1>, i32, i32) 232 233define void @test_vssseg3_nxv8i8_triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, i32 %offset, i32 %vl) { 234; CHECK-LABEL: test_vssseg3_nxv8i8_triscv.vector.tuple_nxv8i8_3t: 235; CHECK: # %bb.0: # %entry 236; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, ma 237; CHECK-NEXT: vssseg3e8.v v8, (a0), a1 238; CHECK-NEXT: ret 239entry: 240 tail call void @llvm.riscv.vssseg3.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, i32 %offset, i32 %vl, i32 3) 241 ret void 242} 243 244define void @test_vssseg3_mask_nxv8i8_triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, i32 %offset, i32 %vl, <vscale x 8 x i1> %mask) { 245; CHECK-LABEL: test_vssseg3_mask_nxv8i8_triscv.vector.tuple_nxv8i8_3t: 246; CHECK: # %bb.0: # %entry 247; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, ma 248; CHECK-NEXT: vssseg3e8.v v8, (a0), a1, v0.t 249; CHECK-NEXT: ret 250entry: 251 tail call void @llvm.riscv.vssseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, i32 %offset, <vscale x 8 x i1> %mask, i32 %vl, i32 3) 252 ret void 253} 254 255declare void @llvm.riscv.vssseg3.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", <vscale x 16 x i8>, 3), ptr, i32, i32, i32) 256declare void @llvm.riscv.vssseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 3), ptr, i32, <vscale x 16 x i1>, i32, i32) 257 258define void @test_vssseg3_nxv16i8_triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, i32 %offset, i32 %vl) { 259; CHECK-LABEL: test_vssseg3_nxv16i8_triscv.vector.tuple_nxv16i8_3t: 260; CHECK: # %bb.0: # %entry 261; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, ma 262; CHECK-NEXT: vssseg3e8.v v8, (a0), a1 263; CHECK-NEXT: ret 264entry: 265 tail call void @llvm.riscv.vssseg3.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, i32 %offset, i32 %vl, i32 3) 266 ret void 267} 268 269define void @test_vssseg3_mask_nxv16i8_triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, i32 %offset, i32 %vl, <vscale x 16 x i1> %mask) { 270; CHECK-LABEL: test_vssseg3_mask_nxv16i8_triscv.vector.tuple_nxv16i8_3t: 271; CHECK: # %bb.0: # %entry 272; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, ma 273; CHECK-NEXT: vssseg3e8.v v8, (a0), a1, v0.t 274; CHECK-NEXT: ret 275entry: 276 tail call void @llvm.riscv.vssseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, i32 %offset, <vscale x 16 x i1> %mask, i32 %vl, i32 3) 277 ret void 278} 279 280declare void @llvm.riscv.vssseg4.triscv.vector.tuple_nxv1i8_4t(target("riscv.vector.tuple", <vscale x 1 x i8>, 4), ptr, i32, i32, i32) 281declare void @llvm.riscv.vssseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i1(target("riscv.vector.tuple", <vscale x 1 x i8>, 4), ptr, i32, <vscale x 1 x i1>, i32, i32) 282 283define void @test_vssseg4_nxv1i8_triscv.vector.tuple_nxv1i8_4t(target("riscv.vector.tuple", <vscale x 1 x i8>, 4) %val, ptr %base, i32 %offset, i32 %vl) { 284; CHECK-LABEL: test_vssseg4_nxv1i8_triscv.vector.tuple_nxv1i8_4t: 285; CHECK: # %bb.0: # %entry 286; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, ma 287; CHECK-NEXT: vssseg4e8.v v8, (a0), a1 288; CHECK-NEXT: ret 289entry: 290 tail call void @llvm.riscv.vssseg4.triscv.vector.tuple_nxv1i8_4t(target("riscv.vector.tuple", <vscale x 1 x i8>, 4) %val, ptr %base, i32 %offset, i32 %vl, i32 3) 291 ret void 292} 293 294define void @test_vssseg4_mask_nxv1i8_triscv.vector.tuple_nxv1i8_4t(target("riscv.vector.tuple", <vscale x 1 x i8>, 4) %val, ptr %base, i32 %offset, i32 %vl, <vscale x 1 x i1> %mask) { 295; CHECK-LABEL: test_vssseg4_mask_nxv1i8_triscv.vector.tuple_nxv1i8_4t: 296; CHECK: # %bb.0: # %entry 297; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, ma 298; CHECK-NEXT: vssseg4e8.v v8, (a0), a1, v0.t 299; CHECK-NEXT: ret 300entry: 301 tail call void @llvm.riscv.vssseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i1(target("riscv.vector.tuple", <vscale x 1 x i8>, 4) %val, ptr %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl, i32 3) 302 ret void 303} 304 305declare void @llvm.riscv.vssseg4.triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", <vscale x 2 x i8>, 4), ptr, i32, i32, i32) 306declare void @llvm.riscv.vssseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 4), ptr, i32, <vscale x 2 x i1>, i32, i32) 307 308define void @test_vssseg4_nxv2i8_triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) %val, ptr %base, i32 %offset, i32 %vl) { 309; CHECK-LABEL: test_vssseg4_nxv2i8_triscv.vector.tuple_nxv2i8_4t: 310; CHECK: # %bb.0: # %entry 311; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, ma 312; CHECK-NEXT: vssseg4e8.v v8, (a0), a1 313; CHECK-NEXT: ret 314entry: 315 tail call void @llvm.riscv.vssseg4.triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) %val, ptr %base, i32 %offset, i32 %vl, i32 3) 316 ret void 317} 318 319define void @test_vssseg4_mask_nxv2i8_triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) %val, ptr %base, i32 %offset, i32 %vl, <vscale x 2 x i1> %mask) { 320; CHECK-LABEL: test_vssseg4_mask_nxv2i8_triscv.vector.tuple_nxv2i8_4t: 321; CHECK: # %bb.0: # %entry 322; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, ma 323; CHECK-NEXT: vssseg4e8.v v8, (a0), a1, v0.t 324; CHECK-NEXT: ret 325entry: 326 tail call void @llvm.riscv.vssseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) %val, ptr %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl, i32 3) 327 ret void 328} 329 330declare void @llvm.riscv.vssseg4.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", <vscale x 4 x i8>, 4), ptr, i32, i32, i32) 331declare void @llvm.riscv.vssseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 4), ptr, i32, <vscale x 4 x i1>, i32, i32) 332 333define void @test_vssseg4_nxv4i8_triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %val, ptr %base, i32 %offset, i32 %vl) { 334; CHECK-LABEL: test_vssseg4_nxv4i8_triscv.vector.tuple_nxv4i8_4t: 335; CHECK: # %bb.0: # %entry 336; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, ma 337; CHECK-NEXT: vssseg4e8.v v8, (a0), a1 338; CHECK-NEXT: ret 339entry: 340 tail call void @llvm.riscv.vssseg4.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %val, ptr %base, i32 %offset, i32 %vl, i32 3) 341 ret void 342} 343 344define void @test_vssseg4_mask_nxv4i8_triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %val, ptr %base, i32 %offset, i32 %vl, <vscale x 4 x i1> %mask) { 345; CHECK-LABEL: test_vssseg4_mask_nxv4i8_triscv.vector.tuple_nxv4i8_4t: 346; CHECK: # %bb.0: # %entry 347; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, ma 348; CHECK-NEXT: vssseg4e8.v v8, (a0), a1, v0.t 349; CHECK-NEXT: ret 350entry: 351 tail call void @llvm.riscv.vssseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %val, ptr %base, i32 %offset, <vscale x 4 x i1> %mask, i32 %vl, i32 3) 352 ret void 353} 354 355declare void @llvm.riscv.vssseg4.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", <vscale x 8 x i8>, 4), ptr, i32, i32, i32) 356declare void @llvm.riscv.vssseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 4), ptr, i32, <vscale x 8 x i1>, i32, i32) 357 358define void @test_vssseg4_nxv8i8_triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, i32 %offset, i32 %vl) { 359; CHECK-LABEL: test_vssseg4_nxv8i8_triscv.vector.tuple_nxv8i8_4t: 360; CHECK: # %bb.0: # %entry 361; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, ma 362; CHECK-NEXT: vssseg4e8.v v8, (a0), a1 363; CHECK-NEXT: ret 364entry: 365 tail call void @llvm.riscv.vssseg4.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, i32 %offset, i32 %vl, i32 3) 366 ret void 367} 368 369define void @test_vssseg4_mask_nxv8i8_triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, i32 %offset, i32 %vl, <vscale x 8 x i1> %mask) { 370; CHECK-LABEL: test_vssseg4_mask_nxv8i8_triscv.vector.tuple_nxv8i8_4t: 371; CHECK: # %bb.0: # %entry 372; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, ma 373; CHECK-NEXT: vssseg4e8.v v8, (a0), a1, v0.t 374; CHECK-NEXT: ret 375entry: 376 tail call void @llvm.riscv.vssseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, i32 %offset, <vscale x 8 x i1> %mask, i32 %vl, i32 3) 377 ret void 378} 379 380declare void @llvm.riscv.vssseg4.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", <vscale x 16 x i8>, 4), ptr, i32, i32, i32) 381declare void @llvm.riscv.vssseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 4), ptr, i32, <vscale x 16 x i1>, i32, i32) 382 383define void @test_vssseg4_nxv16i8_triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, i32 %offset, i32 %vl) { 384; CHECK-LABEL: test_vssseg4_nxv16i8_triscv.vector.tuple_nxv16i8_4t: 385; CHECK: # %bb.0: # %entry 386; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, ma 387; CHECK-NEXT: vssseg4e8.v v8, (a0), a1 388; CHECK-NEXT: ret 389entry: 390 tail call void @llvm.riscv.vssseg4.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, i32 %offset, i32 %vl, i32 3) 391 ret void 392} 393 394define void @test_vssseg4_mask_nxv16i8_triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, i32 %offset, i32 %vl, <vscale x 16 x i1> %mask) { 395; CHECK-LABEL: test_vssseg4_mask_nxv16i8_triscv.vector.tuple_nxv16i8_4t: 396; CHECK: # %bb.0: # %entry 397; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, ma 398; CHECK-NEXT: vssseg4e8.v v8, (a0), a1, v0.t 399; CHECK-NEXT: ret 400entry: 401 tail call void @llvm.riscv.vssseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, i32 %offset, <vscale x 16 x i1> %mask, i32 %vl, i32 3) 402 ret void 403} 404 405declare void @llvm.riscv.vssseg5.triscv.vector.tuple_nxv1i8_5t(target("riscv.vector.tuple", <vscale x 1 x i8>, 5), ptr, i32, i32, i32) 406declare void @llvm.riscv.vssseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i1(target("riscv.vector.tuple", <vscale x 1 x i8>, 5), ptr, i32, <vscale x 1 x i1>, i32, i32) 407 408define void @test_vssseg5_nxv1i8_triscv.vector.tuple_nxv1i8_5t(target("riscv.vector.tuple", <vscale x 1 x i8>, 5) %val, ptr %base, i32 %offset, i32 %vl) { 409; CHECK-LABEL: test_vssseg5_nxv1i8_triscv.vector.tuple_nxv1i8_5t: 410; CHECK: # %bb.0: # %entry 411; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, ma 412; CHECK-NEXT: vssseg5e8.v v8, (a0), a1 413; CHECK-NEXT: ret 414entry: 415 tail call void @llvm.riscv.vssseg5.triscv.vector.tuple_nxv1i8_5t(target("riscv.vector.tuple", <vscale x 1 x i8>, 5) %val, ptr %base, i32 %offset, i32 %vl, i32 3) 416 ret void 417} 418 419define void @test_vssseg5_mask_nxv1i8_triscv.vector.tuple_nxv1i8_5t(target("riscv.vector.tuple", <vscale x 1 x i8>, 5) %val, ptr %base, i32 %offset, i32 %vl, <vscale x 1 x i1> %mask) { 420; CHECK-LABEL: test_vssseg5_mask_nxv1i8_triscv.vector.tuple_nxv1i8_5t: 421; CHECK: # %bb.0: # %entry 422; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, ma 423; CHECK-NEXT: vssseg5e8.v v8, (a0), a1, v0.t 424; CHECK-NEXT: ret 425entry: 426 tail call void @llvm.riscv.vssseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i1(target("riscv.vector.tuple", <vscale x 1 x i8>, 5) %val, ptr %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl, i32 3) 427 ret void 428} 429 430declare void @llvm.riscv.vssseg5.triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", <vscale x 2 x i8>, 5), ptr, i32, i32, i32) 431declare void @llvm.riscv.vssseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 5), ptr, i32, <vscale x 2 x i1>, i32, i32) 432 433define void @test_vssseg5_nxv2i8_triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) %val, ptr %base, i32 %offset, i32 %vl) { 434; CHECK-LABEL: test_vssseg5_nxv2i8_triscv.vector.tuple_nxv2i8_5t: 435; CHECK: # %bb.0: # %entry 436; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, ma 437; CHECK-NEXT: vssseg5e8.v v8, (a0), a1 438; CHECK-NEXT: ret 439entry: 440 tail call void @llvm.riscv.vssseg5.triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) %val, ptr %base, i32 %offset, i32 %vl, i32 3) 441 ret void 442} 443 444define void @test_vssseg5_mask_nxv2i8_triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) %val, ptr %base, i32 %offset, i32 %vl, <vscale x 2 x i1> %mask) { 445; CHECK-LABEL: test_vssseg5_mask_nxv2i8_triscv.vector.tuple_nxv2i8_5t: 446; CHECK: # %bb.0: # %entry 447; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, ma 448; CHECK-NEXT: vssseg5e8.v v8, (a0), a1, v0.t 449; CHECK-NEXT: ret 450entry: 451 tail call void @llvm.riscv.vssseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) %val, ptr %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl, i32 3) 452 ret void 453} 454 455declare void @llvm.riscv.vssseg5.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", <vscale x 4 x i8>, 5), ptr, i32, i32, i32) 456declare void @llvm.riscv.vssseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 5), ptr, i32, <vscale x 4 x i1>, i32, i32) 457 458define void @test_vssseg5_nxv4i8_triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) %val, ptr %base, i32 %offset, i32 %vl) { 459; CHECK-LABEL: test_vssseg5_nxv4i8_triscv.vector.tuple_nxv4i8_5t: 460; CHECK: # %bb.0: # %entry 461; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, ma 462; CHECK-NEXT: vssseg5e8.v v8, (a0), a1 463; CHECK-NEXT: ret 464entry: 465 tail call void @llvm.riscv.vssseg5.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) %val, ptr %base, i32 %offset, i32 %vl, i32 3) 466 ret void 467} 468 469define void @test_vssseg5_mask_nxv4i8_triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) %val, ptr %base, i32 %offset, i32 %vl, <vscale x 4 x i1> %mask) { 470; CHECK-LABEL: test_vssseg5_mask_nxv4i8_triscv.vector.tuple_nxv4i8_5t: 471; CHECK: # %bb.0: # %entry 472; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, ma 473; CHECK-NEXT: vssseg5e8.v v8, (a0), a1, v0.t 474; CHECK-NEXT: ret 475entry: 476 tail call void @llvm.riscv.vssseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) %val, ptr %base, i32 %offset, <vscale x 4 x i1> %mask, i32 %vl, i32 3) 477 ret void 478} 479 480declare void @llvm.riscv.vssseg5.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", <vscale x 8 x i8>, 5), ptr, i32, i32, i32) 481declare void @llvm.riscv.vssseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 5), ptr, i32, <vscale x 8 x i1>, i32, i32) 482 483define void @test_vssseg5_nxv8i8_triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, i32 %offset, i32 %vl) { 484; CHECK-LABEL: test_vssseg5_nxv8i8_triscv.vector.tuple_nxv8i8_5t: 485; CHECK: # %bb.0: # %entry 486; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, ma 487; CHECK-NEXT: vssseg5e8.v v8, (a0), a1 488; CHECK-NEXT: ret 489entry: 490 tail call void @llvm.riscv.vssseg5.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, i32 %offset, i32 %vl, i32 3) 491 ret void 492} 493 494define void @test_vssseg5_mask_nxv8i8_triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, i32 %offset, i32 %vl, <vscale x 8 x i1> %mask) { 495; CHECK-LABEL: test_vssseg5_mask_nxv8i8_triscv.vector.tuple_nxv8i8_5t: 496; CHECK: # %bb.0: # %entry 497; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, ma 498; CHECK-NEXT: vssseg5e8.v v8, (a0), a1, v0.t 499; CHECK-NEXT: ret 500entry: 501 tail call void @llvm.riscv.vssseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, i32 %offset, <vscale x 8 x i1> %mask, i32 %vl, i32 3) 502 ret void 503} 504 505declare void @llvm.riscv.vssseg6.triscv.vector.tuple_nxv1i8_6t(target("riscv.vector.tuple", <vscale x 1 x i8>, 6), ptr, i32, i32, i32) 506declare void @llvm.riscv.vssseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i1(target("riscv.vector.tuple", <vscale x 1 x i8>, 6), ptr, i32, <vscale x 1 x i1>, i32, i32) 507 508define void @test_vssseg6_nxv1i8_triscv.vector.tuple_nxv1i8_6t(target("riscv.vector.tuple", <vscale x 1 x i8>, 6) %val, ptr %base, i32 %offset, i32 %vl) { 509; CHECK-LABEL: test_vssseg6_nxv1i8_triscv.vector.tuple_nxv1i8_6t: 510; CHECK: # %bb.0: # %entry 511; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, ma 512; CHECK-NEXT: vssseg6e8.v v8, (a0), a1 513; CHECK-NEXT: ret 514entry: 515 tail call void @llvm.riscv.vssseg6.triscv.vector.tuple_nxv1i8_6t(target("riscv.vector.tuple", <vscale x 1 x i8>, 6) %val, ptr %base, i32 %offset, i32 %vl, i32 3) 516 ret void 517} 518 519define void @test_vssseg6_mask_nxv1i8_triscv.vector.tuple_nxv1i8_6t(target("riscv.vector.tuple", <vscale x 1 x i8>, 6) %val, ptr %base, i32 %offset, i32 %vl, <vscale x 1 x i1> %mask) { 520; CHECK-LABEL: test_vssseg6_mask_nxv1i8_triscv.vector.tuple_nxv1i8_6t: 521; CHECK: # %bb.0: # %entry 522; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, ma 523; CHECK-NEXT: vssseg6e8.v v8, (a0), a1, v0.t 524; CHECK-NEXT: ret 525entry: 526 tail call void @llvm.riscv.vssseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i1(target("riscv.vector.tuple", <vscale x 1 x i8>, 6) %val, ptr %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl, i32 3) 527 ret void 528} 529 530declare void @llvm.riscv.vssseg6.triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", <vscale x 2 x i8>, 6), ptr, i32, i32, i32) 531declare void @llvm.riscv.vssseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 6), ptr, i32, <vscale x 2 x i1>, i32, i32) 532 533define void @test_vssseg6_nxv2i8_triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) %val, ptr %base, i32 %offset, i32 %vl) { 534; CHECK-LABEL: test_vssseg6_nxv2i8_triscv.vector.tuple_nxv2i8_6t: 535; CHECK: # %bb.0: # %entry 536; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, ma 537; CHECK-NEXT: vssseg6e8.v v8, (a0), a1 538; CHECK-NEXT: ret 539entry: 540 tail call void @llvm.riscv.vssseg6.triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) %val, ptr %base, i32 %offset, i32 %vl, i32 3) 541 ret void 542} 543 544define void @test_vssseg6_mask_nxv2i8_triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) %val, ptr %base, i32 %offset, i32 %vl, <vscale x 2 x i1> %mask) { 545; CHECK-LABEL: test_vssseg6_mask_nxv2i8_triscv.vector.tuple_nxv2i8_6t: 546; CHECK: # %bb.0: # %entry 547; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, ma 548; CHECK-NEXT: vssseg6e8.v v8, (a0), a1, v0.t 549; CHECK-NEXT: ret 550entry: 551 tail call void @llvm.riscv.vssseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) %val, ptr %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl, i32 3) 552 ret void 553} 554 555declare void @llvm.riscv.vssseg6.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", <vscale x 4 x i8>, 6), ptr, i32, i32, i32) 556declare void @llvm.riscv.vssseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 6), ptr, i32, <vscale x 4 x i1>, i32, i32) 557 558define void @test_vssseg6_nxv4i8_triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) %val, ptr %base, i32 %offset, i32 %vl) { 559; CHECK-LABEL: test_vssseg6_nxv4i8_triscv.vector.tuple_nxv4i8_6t: 560; CHECK: # %bb.0: # %entry 561; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, ma 562; CHECK-NEXT: vssseg6e8.v v8, (a0), a1 563; CHECK-NEXT: ret 564entry: 565 tail call void @llvm.riscv.vssseg6.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) %val, ptr %base, i32 %offset, i32 %vl, i32 3) 566 ret void 567} 568 569define void @test_vssseg6_mask_nxv4i8_triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) %val, ptr %base, i32 %offset, i32 %vl, <vscale x 4 x i1> %mask) { 570; CHECK-LABEL: test_vssseg6_mask_nxv4i8_triscv.vector.tuple_nxv4i8_6t: 571; CHECK: # %bb.0: # %entry 572; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, ma 573; CHECK-NEXT: vssseg6e8.v v8, (a0), a1, v0.t 574; CHECK-NEXT: ret 575entry: 576 tail call void @llvm.riscv.vssseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) %val, ptr %base, i32 %offset, <vscale x 4 x i1> %mask, i32 %vl, i32 3) 577 ret void 578} 579 580declare void @llvm.riscv.vssseg6.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", <vscale x 8 x i8>, 6), ptr, i32, i32, i32) 581declare void @llvm.riscv.vssseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 6), ptr, i32, <vscale x 8 x i1>, i32, i32) 582 583define void @test_vssseg6_nxv8i8_triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, i32 %offset, i32 %vl) { 584; CHECK-LABEL: test_vssseg6_nxv8i8_triscv.vector.tuple_nxv8i8_6t: 585; CHECK: # %bb.0: # %entry 586; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, ma 587; CHECK-NEXT: vssseg6e8.v v8, (a0), a1 588; CHECK-NEXT: ret 589entry: 590 tail call void @llvm.riscv.vssseg6.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, i32 %offset, i32 %vl, i32 3) 591 ret void 592} 593 594define void @test_vssseg6_mask_nxv8i8_triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, i32 %offset, i32 %vl, <vscale x 8 x i1> %mask) { 595; CHECK-LABEL: test_vssseg6_mask_nxv8i8_triscv.vector.tuple_nxv8i8_6t: 596; CHECK: # %bb.0: # %entry 597; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, ma 598; CHECK-NEXT: vssseg6e8.v v8, (a0), a1, v0.t 599; CHECK-NEXT: ret 600entry: 601 tail call void @llvm.riscv.vssseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, i32 %offset, <vscale x 8 x i1> %mask, i32 %vl, i32 3) 602 ret void 603} 604 605declare void @llvm.riscv.vssseg7.triscv.vector.tuple_nxv1i8_7t(target("riscv.vector.tuple", <vscale x 1 x i8>, 7), ptr, i32, i32, i32) 606declare void @llvm.riscv.vssseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i1(target("riscv.vector.tuple", <vscale x 1 x i8>, 7), ptr, i32, <vscale x 1 x i1>, i32, i32) 607 608define void @test_vssseg7_nxv1i8_triscv.vector.tuple_nxv1i8_7t(target("riscv.vector.tuple", <vscale x 1 x i8>, 7) %val, ptr %base, i32 %offset, i32 %vl) { 609; CHECK-LABEL: test_vssseg7_nxv1i8_triscv.vector.tuple_nxv1i8_7t: 610; CHECK: # %bb.0: # %entry 611; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, ma 612; CHECK-NEXT: vssseg7e8.v v8, (a0), a1 613; CHECK-NEXT: ret 614entry: 615 tail call void @llvm.riscv.vssseg7.triscv.vector.tuple_nxv1i8_7t(target("riscv.vector.tuple", <vscale x 1 x i8>, 7) %val, ptr %base, i32 %offset, i32 %vl, i32 3) 616 ret void 617} 618 619define void @test_vssseg7_mask_nxv1i8_triscv.vector.tuple_nxv1i8_7t(target("riscv.vector.tuple", <vscale x 1 x i8>, 7) %val, ptr %base, i32 %offset, i32 %vl, <vscale x 1 x i1> %mask) { 620; CHECK-LABEL: test_vssseg7_mask_nxv1i8_triscv.vector.tuple_nxv1i8_7t: 621; CHECK: # %bb.0: # %entry 622; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, ma 623; CHECK-NEXT: vssseg7e8.v v8, (a0), a1, v0.t 624; CHECK-NEXT: ret 625entry: 626 tail call void @llvm.riscv.vssseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i1(target("riscv.vector.tuple", <vscale x 1 x i8>, 7) %val, ptr %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl, i32 3) 627 ret void 628} 629 630declare void @llvm.riscv.vssseg7.triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", <vscale x 2 x i8>, 7), ptr, i32, i32, i32) 631declare void @llvm.riscv.vssseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 7), ptr, i32, <vscale x 2 x i1>, i32, i32) 632 633define void @test_vssseg7_nxv2i8_triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) %val, ptr %base, i32 %offset, i32 %vl) { 634; CHECK-LABEL: test_vssseg7_nxv2i8_triscv.vector.tuple_nxv2i8_7t: 635; CHECK: # %bb.0: # %entry 636; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, ma 637; CHECK-NEXT: vssseg7e8.v v8, (a0), a1 638; CHECK-NEXT: ret 639entry: 640 tail call void @llvm.riscv.vssseg7.triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) %val, ptr %base, i32 %offset, i32 %vl, i32 3) 641 ret void 642} 643 644define void @test_vssseg7_mask_nxv2i8_triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) %val, ptr %base, i32 %offset, i32 %vl, <vscale x 2 x i1> %mask) { 645; CHECK-LABEL: test_vssseg7_mask_nxv2i8_triscv.vector.tuple_nxv2i8_7t: 646; CHECK: # %bb.0: # %entry 647; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, ma 648; CHECK-NEXT: vssseg7e8.v v8, (a0), a1, v0.t 649; CHECK-NEXT: ret 650entry: 651 tail call void @llvm.riscv.vssseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) %val, ptr %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl, i32 3) 652 ret void 653} 654 655declare void @llvm.riscv.vssseg7.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", <vscale x 4 x i8>, 7), ptr, i32, i32, i32) 656declare void @llvm.riscv.vssseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 7), ptr, i32, <vscale x 4 x i1>, i32, i32) 657 658define void @test_vssseg7_nxv4i8_triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) %val, ptr %base, i32 %offset, i32 %vl) { 659; CHECK-LABEL: test_vssseg7_nxv4i8_triscv.vector.tuple_nxv4i8_7t: 660; CHECK: # %bb.0: # %entry 661; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, ma 662; CHECK-NEXT: vssseg7e8.v v8, (a0), a1 663; CHECK-NEXT: ret 664entry: 665 tail call void @llvm.riscv.vssseg7.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) %val, ptr %base, i32 %offset, i32 %vl, i32 3) 666 ret void 667} 668 669define void @test_vssseg7_mask_nxv4i8_triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) %val, ptr %base, i32 %offset, i32 %vl, <vscale x 4 x i1> %mask) { 670; CHECK-LABEL: test_vssseg7_mask_nxv4i8_triscv.vector.tuple_nxv4i8_7t: 671; CHECK: # %bb.0: # %entry 672; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, ma 673; CHECK-NEXT: vssseg7e8.v v8, (a0), a1, v0.t 674; CHECK-NEXT: ret 675entry: 676 tail call void @llvm.riscv.vssseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) %val, ptr %base, i32 %offset, <vscale x 4 x i1> %mask, i32 %vl, i32 3) 677 ret void 678} 679 680declare void @llvm.riscv.vssseg7.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", <vscale x 8 x i8>, 7), ptr, i32, i32, i32) 681declare void @llvm.riscv.vssseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 7), ptr, i32, <vscale x 8 x i1>, i32, i32) 682 683define void @test_vssseg7_nxv8i8_triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, i32 %offset, i32 %vl) { 684; CHECK-LABEL: test_vssseg7_nxv8i8_triscv.vector.tuple_nxv8i8_7t: 685; CHECK: # %bb.0: # %entry 686; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, ma 687; CHECK-NEXT: vssseg7e8.v v8, (a0), a1 688; CHECK-NEXT: ret 689entry: 690 tail call void @llvm.riscv.vssseg7.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, i32 %offset, i32 %vl, i32 3) 691 ret void 692} 693 694define void @test_vssseg7_mask_nxv8i8_triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, i32 %offset, i32 %vl, <vscale x 8 x i1> %mask) { 695; CHECK-LABEL: test_vssseg7_mask_nxv8i8_triscv.vector.tuple_nxv8i8_7t: 696; CHECK: # %bb.0: # %entry 697; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, ma 698; CHECK-NEXT: vssseg7e8.v v8, (a0), a1, v0.t 699; CHECK-NEXT: ret 700entry: 701 tail call void @llvm.riscv.vssseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, i32 %offset, <vscale x 8 x i1> %mask, i32 %vl, i32 3) 702 ret void 703} 704 705declare void @llvm.riscv.vssseg8.triscv.vector.tuple_nxv1i8_8t(target("riscv.vector.tuple", <vscale x 1 x i8>, 8), ptr, i32, i32, i32) 706declare void @llvm.riscv.vssseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i1(target("riscv.vector.tuple", <vscale x 1 x i8>, 8), ptr, i32, <vscale x 1 x i1>, i32, i32) 707 708define void @test_vssseg8_nxv1i8_triscv.vector.tuple_nxv1i8_8t(target("riscv.vector.tuple", <vscale x 1 x i8>, 8) %val, ptr %base, i32 %offset, i32 %vl) { 709; CHECK-LABEL: test_vssseg8_nxv1i8_triscv.vector.tuple_nxv1i8_8t: 710; CHECK: # %bb.0: # %entry 711; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, ma 712; CHECK-NEXT: vssseg8e8.v v8, (a0), a1 713; CHECK-NEXT: ret 714entry: 715 tail call void @llvm.riscv.vssseg8.triscv.vector.tuple_nxv1i8_8t(target("riscv.vector.tuple", <vscale x 1 x i8>, 8) %val, ptr %base, i32 %offset, i32 %vl, i32 3) 716 ret void 717} 718 719define void @test_vssseg8_mask_nxv1i8_triscv.vector.tuple_nxv1i8_8t(target("riscv.vector.tuple", <vscale x 1 x i8>, 8) %val, ptr %base, i32 %offset, i32 %vl, <vscale x 1 x i1> %mask) { 720; CHECK-LABEL: test_vssseg8_mask_nxv1i8_triscv.vector.tuple_nxv1i8_8t: 721; CHECK: # %bb.0: # %entry 722; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, ma 723; CHECK-NEXT: vssseg8e8.v v8, (a0), a1, v0.t 724; CHECK-NEXT: ret 725entry: 726 tail call void @llvm.riscv.vssseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i1(target("riscv.vector.tuple", <vscale x 1 x i8>, 8) %val, ptr %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl, i32 3) 727 ret void 728} 729 730declare void @llvm.riscv.vssseg8.triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", <vscale x 2 x i8>, 8), ptr, i32, i32, i32) 731declare void @llvm.riscv.vssseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 8), ptr, i32, <vscale x 2 x i1>, i32, i32) 732 733define void @test_vssseg8_nxv2i8_triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) %val, ptr %base, i32 %offset, i32 %vl) { 734; CHECK-LABEL: test_vssseg8_nxv2i8_triscv.vector.tuple_nxv2i8_8t: 735; CHECK: # %bb.0: # %entry 736; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, ma 737; CHECK-NEXT: vssseg8e8.v v8, (a0), a1 738; CHECK-NEXT: ret 739entry: 740 tail call void @llvm.riscv.vssseg8.triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) %val, ptr %base, i32 %offset, i32 %vl, i32 3) 741 ret void 742} 743 744define void @test_vssseg8_mask_nxv2i8_triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) %val, ptr %base, i32 %offset, i32 %vl, <vscale x 2 x i1> %mask) { 745; CHECK-LABEL: test_vssseg8_mask_nxv2i8_triscv.vector.tuple_nxv2i8_8t: 746; CHECK: # %bb.0: # %entry 747; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, ma 748; CHECK-NEXT: vssseg8e8.v v8, (a0), a1, v0.t 749; CHECK-NEXT: ret 750entry: 751 tail call void @llvm.riscv.vssseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) %val, ptr %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl, i32 3) 752 ret void 753} 754 755declare void @llvm.riscv.vssseg8.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", <vscale x 4 x i8>, 8), ptr, i32, i32, i32) 756declare void @llvm.riscv.vssseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 8), ptr, i32, <vscale x 4 x i1>, i32, i32) 757 758define void @test_vssseg8_nxv4i8_triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %val, ptr %base, i32 %offset, i32 %vl) { 759; CHECK-LABEL: test_vssseg8_nxv4i8_triscv.vector.tuple_nxv4i8_8t: 760; CHECK: # %bb.0: # %entry 761; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, ma 762; CHECK-NEXT: vssseg8e8.v v8, (a0), a1 763; CHECK-NEXT: ret 764entry: 765 tail call void @llvm.riscv.vssseg8.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %val, ptr %base, i32 %offset, i32 %vl, i32 3) 766 ret void 767} 768 769define void @test_vssseg8_mask_nxv4i8_triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %val, ptr %base, i32 %offset, i32 %vl, <vscale x 4 x i1> %mask) { 770; CHECK-LABEL: test_vssseg8_mask_nxv4i8_triscv.vector.tuple_nxv4i8_8t: 771; CHECK: # %bb.0: # %entry 772; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, ma 773; CHECK-NEXT: vssseg8e8.v v8, (a0), a1, v0.t 774; CHECK-NEXT: ret 775entry: 776 tail call void @llvm.riscv.vssseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %val, ptr %base, i32 %offset, <vscale x 4 x i1> %mask, i32 %vl, i32 3) 777 ret void 778} 779 780declare void @llvm.riscv.vssseg8.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", <vscale x 8 x i8>, 8), ptr, i32, i32, i32) 781declare void @llvm.riscv.vssseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 8), ptr, i32, <vscale x 8 x i1>, i32, i32) 782 783define void @test_vssseg8_nxv8i8_triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, i32 %offset, i32 %vl) { 784; CHECK-LABEL: test_vssseg8_nxv8i8_triscv.vector.tuple_nxv8i8_8t: 785; CHECK: # %bb.0: # %entry 786; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, ma 787; CHECK-NEXT: vssseg8e8.v v8, (a0), a1 788; CHECK-NEXT: ret 789entry: 790 tail call void @llvm.riscv.vssseg8.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, i32 %offset, i32 %vl, i32 3) 791 ret void 792} 793 794define void @test_vssseg8_mask_nxv8i8_triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, i32 %offset, i32 %vl, <vscale x 8 x i1> %mask) { 795; CHECK-LABEL: test_vssseg8_mask_nxv8i8_triscv.vector.tuple_nxv8i8_8t: 796; CHECK: # %bb.0: # %entry 797; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, ma 798; CHECK-NEXT: vssseg8e8.v v8, (a0), a1, v0.t 799; CHECK-NEXT: ret 800entry: 801 tail call void @llvm.riscv.vssseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, i32 %offset, <vscale x 8 x i1> %mask, i32 %vl, i32 3) 802 ret void 803} 804 805declare void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 2), ptr, i32, <vscale x 1 x i1>, i32, i32) 806 807define void @test_vssseg2_nxv1i16_triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) %val, ptr %base, i32 %offset, i32 %vl) { 808; CHECK-LABEL: test_vssseg2_nxv1i16_triscv.vector.tuple_nxv2i8_2t: 809; CHECK: # %bb.0: # %entry 810; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma 811; CHECK-NEXT: vssseg2e16.v v8, (a0), a1 812; CHECK-NEXT: ret 813entry: 814 tail call void @llvm.riscv.vssseg2.triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) %val, ptr %base, i32 %offset, i32 %vl, i32 4) 815 ret void 816} 817 818define void @test_vssseg2_mask_nxv1i16_triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) %val, ptr %base, i32 %offset, i32 %vl, <vscale x 1 x i1> %mask) { 819; CHECK-LABEL: test_vssseg2_mask_nxv1i16_triscv.vector.tuple_nxv2i8_2t: 820; CHECK: # %bb.0: # %entry 821; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma 822; CHECK-NEXT: vssseg2e16.v v8, (a0), a1, v0.t 823; CHECK-NEXT: ret 824entry: 825 tail call void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) %val, ptr %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl, i32 4) 826 ret void 827} 828 829declare void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 2), ptr, i32, <vscale x 2 x i1>, i32, i32) 830 831define void @test_vssseg2_nxv2i16_triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %val, ptr %base, i32 %offset, i32 %vl) { 832; CHECK-LABEL: test_vssseg2_nxv2i16_triscv.vector.tuple_nxv4i8_2t: 833; CHECK: # %bb.0: # %entry 834; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma 835; CHECK-NEXT: vssseg2e16.v v8, (a0), a1 836; CHECK-NEXT: ret 837entry: 838 tail call void @llvm.riscv.vssseg2.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %val, ptr %base, i32 %offset, i32 %vl, i32 4) 839 ret void 840} 841 842define void @test_vssseg2_mask_nxv2i16_triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %val, ptr %base, i32 %offset, i32 %vl, <vscale x 2 x i1> %mask) { 843; CHECK-LABEL: test_vssseg2_mask_nxv2i16_triscv.vector.tuple_nxv4i8_2t: 844; CHECK: # %bb.0: # %entry 845; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma 846; CHECK-NEXT: vssseg2e16.v v8, (a0), a1, v0.t 847; CHECK-NEXT: ret 848entry: 849 tail call void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %val, ptr %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl, i32 4) 850 ret void 851} 852 853declare void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 2), ptr, i32, <vscale x 4 x i1>, i32, i32) 854 855define void @test_vssseg2_nxv4i16_triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, i32 %offset, i32 %vl) { 856; CHECK-LABEL: test_vssseg2_nxv4i16_triscv.vector.tuple_nxv8i8_2t: 857; CHECK: # %bb.0: # %entry 858; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma 859; CHECK-NEXT: vssseg2e16.v v8, (a0), a1 860; CHECK-NEXT: ret 861entry: 862 tail call void @llvm.riscv.vssseg2.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, i32 %offset, i32 %vl, i32 4) 863 ret void 864} 865 866define void @test_vssseg2_mask_nxv4i16_triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, i32 %offset, i32 %vl, <vscale x 4 x i1> %mask) { 867; CHECK-LABEL: test_vssseg2_mask_nxv4i16_triscv.vector.tuple_nxv8i8_2t: 868; CHECK: # %bb.0: # %entry 869; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma 870; CHECK-NEXT: vssseg2e16.v v8, (a0), a1, v0.t 871; CHECK-NEXT: ret 872entry: 873 tail call void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, i32 %offset, <vscale x 4 x i1> %mask, i32 %vl, i32 4) 874 ret void 875} 876 877declare void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 2), ptr, i32, <vscale x 8 x i1>, i32, i32) 878 879define void @test_vssseg2_nxv8i16_triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, i32 %offset, i32 %vl) { 880; CHECK-LABEL: test_vssseg2_nxv8i16_triscv.vector.tuple_nxv16i8_2t: 881; CHECK: # %bb.0: # %entry 882; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, ma 883; CHECK-NEXT: vssseg2e16.v v8, (a0), a1 884; CHECK-NEXT: ret 885entry: 886 tail call void @llvm.riscv.vssseg2.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, i32 %offset, i32 %vl, i32 4) 887 ret void 888} 889 890define void @test_vssseg2_mask_nxv8i16_triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, i32 %offset, i32 %vl, <vscale x 8 x i1> %mask) { 891; CHECK-LABEL: test_vssseg2_mask_nxv8i16_triscv.vector.tuple_nxv16i8_2t: 892; CHECK: # %bb.0: # %entry 893; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, ma 894; CHECK-NEXT: vssseg2e16.v v8, (a0), a1, v0.t 895; CHECK-NEXT: ret 896entry: 897 tail call void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, i32 %offset, <vscale x 8 x i1> %mask, i32 %vl, i32 4) 898 ret void 899} 900 901declare void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i1(target("riscv.vector.tuple", <vscale x 32 x i8>, 2), ptr, i32, <vscale x 16 x i1>, i32, i32) 902 903define void @test_vssseg2_nxv16i16_triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, i32 %offset, i32 %vl) { 904; CHECK-LABEL: test_vssseg2_nxv16i16_triscv.vector.tuple_nxv32i8_2t: 905; CHECK: # %bb.0: # %entry 906; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, ma 907; CHECK-NEXT: vssseg2e16.v v8, (a0), a1 908; CHECK-NEXT: ret 909entry: 910 tail call void @llvm.riscv.vssseg2.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, i32 %offset, i32 %vl, i32 4) 911 ret void 912} 913 914define void @test_vssseg2_mask_nxv16i16_triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, i32 %offset, i32 %vl, <vscale x 16 x i1> %mask) { 915; CHECK-LABEL: test_vssseg2_mask_nxv16i16_triscv.vector.tuple_nxv32i8_2t: 916; CHECK: # %bb.0: # %entry 917; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, ma 918; CHECK-NEXT: vssseg2e16.v v8, (a0), a1, v0.t 919; CHECK-NEXT: ret 920entry: 921 tail call void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i1(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, i32 %offset, <vscale x 16 x i1> %mask, i32 %vl, i32 4) 922 ret void 923} 924 925declare void @llvm.riscv.vssseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 3), ptr, i32, <vscale x 1 x i1>, i32, i32) 926 927define void @test_vssseg3_nxv1i16_triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) %val, ptr %base, i32 %offset, i32 %vl) { 928; CHECK-LABEL: test_vssseg3_nxv1i16_triscv.vector.tuple_nxv2i8_3t: 929; CHECK: # %bb.0: # %entry 930; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma 931; CHECK-NEXT: vssseg3e16.v v8, (a0), a1 932; CHECK-NEXT: ret 933entry: 934 tail call void @llvm.riscv.vssseg3.triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) %val, ptr %base, i32 %offset, i32 %vl, i32 4) 935 ret void 936} 937 938define void @test_vssseg3_mask_nxv1i16_triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) %val, ptr %base, i32 %offset, i32 %vl, <vscale x 1 x i1> %mask) { 939; CHECK-LABEL: test_vssseg3_mask_nxv1i16_triscv.vector.tuple_nxv2i8_3t: 940; CHECK: # %bb.0: # %entry 941; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma 942; CHECK-NEXT: vssseg3e16.v v8, (a0), a1, v0.t 943; CHECK-NEXT: ret 944entry: 945 tail call void @llvm.riscv.vssseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) %val, ptr %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl, i32 4) 946 ret void 947} 948 949declare void @llvm.riscv.vssseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 3), ptr, i32, <vscale x 2 x i1>, i32, i32) 950 951define void @test_vssseg3_nxv2i16_triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %val, ptr %base, i32 %offset, i32 %vl) { 952; CHECK-LABEL: test_vssseg3_nxv2i16_triscv.vector.tuple_nxv4i8_3t: 953; CHECK: # %bb.0: # %entry 954; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma 955; CHECK-NEXT: vssseg3e16.v v8, (a0), a1 956; CHECK-NEXT: ret 957entry: 958 tail call void @llvm.riscv.vssseg3.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %val, ptr %base, i32 %offset, i32 %vl, i32 4) 959 ret void 960} 961 962define void @test_vssseg3_mask_nxv2i16_triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %val, ptr %base, i32 %offset, i32 %vl, <vscale x 2 x i1> %mask) { 963; CHECK-LABEL: test_vssseg3_mask_nxv2i16_triscv.vector.tuple_nxv4i8_3t: 964; CHECK: # %bb.0: # %entry 965; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma 966; CHECK-NEXT: vssseg3e16.v v8, (a0), a1, v0.t 967; CHECK-NEXT: ret 968entry: 969 tail call void @llvm.riscv.vssseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %val, ptr %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl, i32 4) 970 ret void 971} 972 973declare void @llvm.riscv.vssseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 3), ptr, i32, <vscale x 4 x i1>, i32, i32) 974 975define void @test_vssseg3_nxv4i16_triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, i32 %offset, i32 %vl) { 976; CHECK-LABEL: test_vssseg3_nxv4i16_triscv.vector.tuple_nxv8i8_3t: 977; CHECK: # %bb.0: # %entry 978; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma 979; CHECK-NEXT: vssseg3e16.v v8, (a0), a1 980; CHECK-NEXT: ret 981entry: 982 tail call void @llvm.riscv.vssseg3.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, i32 %offset, i32 %vl, i32 4) 983 ret void 984} 985 986define void @test_vssseg3_mask_nxv4i16_triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, i32 %offset, i32 %vl, <vscale x 4 x i1> %mask) { 987; CHECK-LABEL: test_vssseg3_mask_nxv4i16_triscv.vector.tuple_nxv8i8_3t: 988; CHECK: # %bb.0: # %entry 989; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma 990; CHECK-NEXT: vssseg3e16.v v8, (a0), a1, v0.t 991; CHECK-NEXT: ret 992entry: 993 tail call void @llvm.riscv.vssseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, i32 %offset, <vscale x 4 x i1> %mask, i32 %vl, i32 4) 994 ret void 995} 996 997declare void @llvm.riscv.vssseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 3), ptr, i32, <vscale x 8 x i1>, i32, i32) 998 999define void @test_vssseg3_nxv8i16_triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, i32 %offset, i32 %vl) { 1000; CHECK-LABEL: test_vssseg3_nxv8i16_triscv.vector.tuple_nxv16i8_3t: 1001; CHECK: # %bb.0: # %entry 1002; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, ma 1003; CHECK-NEXT: vssseg3e16.v v8, (a0), a1 1004; CHECK-NEXT: ret 1005entry: 1006 tail call void @llvm.riscv.vssseg3.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, i32 %offset, i32 %vl, i32 4) 1007 ret void 1008} 1009 1010define void @test_vssseg3_mask_nxv8i16_triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, i32 %offset, i32 %vl, <vscale x 8 x i1> %mask) { 1011; CHECK-LABEL: test_vssseg3_mask_nxv8i16_triscv.vector.tuple_nxv16i8_3t: 1012; CHECK: # %bb.0: # %entry 1013; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, ma 1014; CHECK-NEXT: vssseg3e16.v v8, (a0), a1, v0.t 1015; CHECK-NEXT: ret 1016entry: 1017 tail call void @llvm.riscv.vssseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, i32 %offset, <vscale x 8 x i1> %mask, i32 %vl, i32 4) 1018 ret void 1019} 1020 1021declare void @llvm.riscv.vssseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 4), ptr, i32, <vscale x 1 x i1>, i32, i32) 1022 1023define void @test_vssseg4_nxv1i16_triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) %val, ptr %base, i32 %offset, i32 %vl) { 1024; CHECK-LABEL: test_vssseg4_nxv1i16_triscv.vector.tuple_nxv2i8_4t: 1025; CHECK: # %bb.0: # %entry 1026; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma 1027; CHECK-NEXT: vssseg4e16.v v8, (a0), a1 1028; CHECK-NEXT: ret 1029entry: 1030 tail call void @llvm.riscv.vssseg4.triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) %val, ptr %base, i32 %offset, i32 %vl, i32 4) 1031 ret void 1032} 1033 1034define void @test_vssseg4_mask_nxv1i16_triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) %val, ptr %base, i32 %offset, i32 %vl, <vscale x 1 x i1> %mask) { 1035; CHECK-LABEL: test_vssseg4_mask_nxv1i16_triscv.vector.tuple_nxv2i8_4t: 1036; CHECK: # %bb.0: # %entry 1037; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma 1038; CHECK-NEXT: vssseg4e16.v v8, (a0), a1, v0.t 1039; CHECK-NEXT: ret 1040entry: 1041 tail call void @llvm.riscv.vssseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) %val, ptr %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl, i32 4) 1042 ret void 1043} 1044 1045declare void @llvm.riscv.vssseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 4), ptr, i32, <vscale x 2 x i1>, i32, i32) 1046 1047define void @test_vssseg4_nxv2i16_triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %val, ptr %base, i32 %offset, i32 %vl) { 1048; CHECK-LABEL: test_vssseg4_nxv2i16_triscv.vector.tuple_nxv4i8_4t: 1049; CHECK: # %bb.0: # %entry 1050; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma 1051; CHECK-NEXT: vssseg4e16.v v8, (a0), a1 1052; CHECK-NEXT: ret 1053entry: 1054 tail call void @llvm.riscv.vssseg4.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %val, ptr %base, i32 %offset, i32 %vl, i32 4) 1055 ret void 1056} 1057 1058define void @test_vssseg4_mask_nxv2i16_triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %val, ptr %base, i32 %offset, i32 %vl, <vscale x 2 x i1> %mask) { 1059; CHECK-LABEL: test_vssseg4_mask_nxv2i16_triscv.vector.tuple_nxv4i8_4t: 1060; CHECK: # %bb.0: # %entry 1061; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma 1062; CHECK-NEXT: vssseg4e16.v v8, (a0), a1, v0.t 1063; CHECK-NEXT: ret 1064entry: 1065 tail call void @llvm.riscv.vssseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %val, ptr %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl, i32 4) 1066 ret void 1067} 1068 1069declare void @llvm.riscv.vssseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 4), ptr, i32, <vscale x 4 x i1>, i32, i32) 1070 1071define void @test_vssseg4_nxv4i16_triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, i32 %offset, i32 %vl) { 1072; CHECK-LABEL: test_vssseg4_nxv4i16_triscv.vector.tuple_nxv8i8_4t: 1073; CHECK: # %bb.0: # %entry 1074; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma 1075; CHECK-NEXT: vssseg4e16.v v8, (a0), a1 1076; CHECK-NEXT: ret 1077entry: 1078 tail call void @llvm.riscv.vssseg4.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, i32 %offset, i32 %vl, i32 4) 1079 ret void 1080} 1081 1082define void @test_vssseg4_mask_nxv4i16_triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, i32 %offset, i32 %vl, <vscale x 4 x i1> %mask) { 1083; CHECK-LABEL: test_vssseg4_mask_nxv4i16_triscv.vector.tuple_nxv8i8_4t: 1084; CHECK: # %bb.0: # %entry 1085; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma 1086; CHECK-NEXT: vssseg4e16.v v8, (a0), a1, v0.t 1087; CHECK-NEXT: ret 1088entry: 1089 tail call void @llvm.riscv.vssseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, i32 %offset, <vscale x 4 x i1> %mask, i32 %vl, i32 4) 1090 ret void 1091} 1092 1093declare void @llvm.riscv.vssseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 4), ptr, i32, <vscale x 8 x i1>, i32, i32) 1094 1095define void @test_vssseg4_nxv8i16_triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, i32 %offset, i32 %vl) { 1096; CHECK-LABEL: test_vssseg4_nxv8i16_triscv.vector.tuple_nxv16i8_4t: 1097; CHECK: # %bb.0: # %entry 1098; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, ma 1099; CHECK-NEXT: vssseg4e16.v v8, (a0), a1 1100; CHECK-NEXT: ret 1101entry: 1102 tail call void @llvm.riscv.vssseg4.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, i32 %offset, i32 %vl, i32 4) 1103 ret void 1104} 1105 1106define void @test_vssseg4_mask_nxv8i16_triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, i32 %offset, i32 %vl, <vscale x 8 x i1> %mask) { 1107; CHECK-LABEL: test_vssseg4_mask_nxv8i16_triscv.vector.tuple_nxv16i8_4t: 1108; CHECK: # %bb.0: # %entry 1109; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, ma 1110; CHECK-NEXT: vssseg4e16.v v8, (a0), a1, v0.t 1111; CHECK-NEXT: ret 1112entry: 1113 tail call void @llvm.riscv.vssseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, i32 %offset, <vscale x 8 x i1> %mask, i32 %vl, i32 4) 1114 ret void 1115} 1116 1117declare void @llvm.riscv.vssseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 5), ptr, i32, <vscale x 1 x i1>, i32, i32) 1118 1119define void @test_vssseg5_nxv1i16_triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) %val, ptr %base, i32 %offset, i32 %vl) { 1120; CHECK-LABEL: test_vssseg5_nxv1i16_triscv.vector.tuple_nxv2i8_5t: 1121; CHECK: # %bb.0: # %entry 1122; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma 1123; CHECK-NEXT: vssseg5e16.v v8, (a0), a1 1124; CHECK-NEXT: ret 1125entry: 1126 tail call void @llvm.riscv.vssseg5.triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) %val, ptr %base, i32 %offset, i32 %vl, i32 4) 1127 ret void 1128} 1129 1130define void @test_vssseg5_mask_nxv1i16_triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) %val, ptr %base, i32 %offset, i32 %vl, <vscale x 1 x i1> %mask) { 1131; CHECK-LABEL: test_vssseg5_mask_nxv1i16_triscv.vector.tuple_nxv2i8_5t: 1132; CHECK: # %bb.0: # %entry 1133; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma 1134; CHECK-NEXT: vssseg5e16.v v8, (a0), a1, v0.t 1135; CHECK-NEXT: ret 1136entry: 1137 tail call void @llvm.riscv.vssseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) %val, ptr %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl, i32 4) 1138 ret void 1139} 1140 1141declare void @llvm.riscv.vssseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 5), ptr, i32, <vscale x 2 x i1>, i32, i32) 1142 1143define void @test_vssseg5_nxv2i16_triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) %val, ptr %base, i32 %offset, i32 %vl) { 1144; CHECK-LABEL: test_vssseg5_nxv2i16_triscv.vector.tuple_nxv4i8_5t: 1145; CHECK: # %bb.0: # %entry 1146; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma 1147; CHECK-NEXT: vssseg5e16.v v8, (a0), a1 1148; CHECK-NEXT: ret 1149entry: 1150 tail call void @llvm.riscv.vssseg5.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) %val, ptr %base, i32 %offset, i32 %vl, i32 4) 1151 ret void 1152} 1153 1154define void @test_vssseg5_mask_nxv2i16_triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) %val, ptr %base, i32 %offset, i32 %vl, <vscale x 2 x i1> %mask) { 1155; CHECK-LABEL: test_vssseg5_mask_nxv2i16_triscv.vector.tuple_nxv4i8_5t: 1156; CHECK: # %bb.0: # %entry 1157; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma 1158; CHECK-NEXT: vssseg5e16.v v8, (a0), a1, v0.t 1159; CHECK-NEXT: ret 1160entry: 1161 tail call void @llvm.riscv.vssseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) %val, ptr %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl, i32 4) 1162 ret void 1163} 1164 1165declare void @llvm.riscv.vssseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 5), ptr, i32, <vscale x 4 x i1>, i32, i32) 1166 1167define void @test_vssseg5_nxv4i16_triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, i32 %offset, i32 %vl) { 1168; CHECK-LABEL: test_vssseg5_nxv4i16_triscv.vector.tuple_nxv8i8_5t: 1169; CHECK: # %bb.0: # %entry 1170; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma 1171; CHECK-NEXT: vssseg5e16.v v8, (a0), a1 1172; CHECK-NEXT: ret 1173entry: 1174 tail call void @llvm.riscv.vssseg5.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, i32 %offset, i32 %vl, i32 4) 1175 ret void 1176} 1177 1178define void @test_vssseg5_mask_nxv4i16_triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, i32 %offset, i32 %vl, <vscale x 4 x i1> %mask) { 1179; CHECK-LABEL: test_vssseg5_mask_nxv4i16_triscv.vector.tuple_nxv8i8_5t: 1180; CHECK: # %bb.0: # %entry 1181; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma 1182; CHECK-NEXT: vssseg5e16.v v8, (a0), a1, v0.t 1183; CHECK-NEXT: ret 1184entry: 1185 tail call void @llvm.riscv.vssseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, i32 %offset, <vscale x 4 x i1> %mask, i32 %vl, i32 4) 1186 ret void 1187} 1188 1189declare void @llvm.riscv.vssseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 6), ptr, i32, <vscale x 1 x i1>, i32, i32) 1190 1191define void @test_vssseg6_nxv1i16_triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) %val, ptr %base, i32 %offset, i32 %vl) { 1192; CHECK-LABEL: test_vssseg6_nxv1i16_triscv.vector.tuple_nxv2i8_6t: 1193; CHECK: # %bb.0: # %entry 1194; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma 1195; CHECK-NEXT: vssseg6e16.v v8, (a0), a1 1196; CHECK-NEXT: ret 1197entry: 1198 tail call void @llvm.riscv.vssseg6.triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) %val, ptr %base, i32 %offset, i32 %vl, i32 4) 1199 ret void 1200} 1201 1202define void @test_vssseg6_mask_nxv1i16_triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) %val, ptr %base, i32 %offset, i32 %vl, <vscale x 1 x i1> %mask) { 1203; CHECK-LABEL: test_vssseg6_mask_nxv1i16_triscv.vector.tuple_nxv2i8_6t: 1204; CHECK: # %bb.0: # %entry 1205; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma 1206; CHECK-NEXT: vssseg6e16.v v8, (a0), a1, v0.t 1207; CHECK-NEXT: ret 1208entry: 1209 tail call void @llvm.riscv.vssseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) %val, ptr %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl, i32 4) 1210 ret void 1211} 1212 1213declare void @llvm.riscv.vssseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 6), ptr, i32, <vscale x 2 x i1>, i32, i32) 1214 1215define void @test_vssseg6_nxv2i16_triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) %val, ptr %base, i32 %offset, i32 %vl) { 1216; CHECK-LABEL: test_vssseg6_nxv2i16_triscv.vector.tuple_nxv4i8_6t: 1217; CHECK: # %bb.0: # %entry 1218; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma 1219; CHECK-NEXT: vssseg6e16.v v8, (a0), a1 1220; CHECK-NEXT: ret 1221entry: 1222 tail call void @llvm.riscv.vssseg6.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) %val, ptr %base, i32 %offset, i32 %vl, i32 4) 1223 ret void 1224} 1225 1226define void @test_vssseg6_mask_nxv2i16_triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) %val, ptr %base, i32 %offset, i32 %vl, <vscale x 2 x i1> %mask) { 1227; CHECK-LABEL: test_vssseg6_mask_nxv2i16_triscv.vector.tuple_nxv4i8_6t: 1228; CHECK: # %bb.0: # %entry 1229; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma 1230; CHECK-NEXT: vssseg6e16.v v8, (a0), a1, v0.t 1231; CHECK-NEXT: ret 1232entry: 1233 tail call void @llvm.riscv.vssseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) %val, ptr %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl, i32 4) 1234 ret void 1235} 1236 1237declare void @llvm.riscv.vssseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 6), ptr, i32, <vscale x 4 x i1>, i32, i32) 1238 1239define void @test_vssseg6_nxv4i16_triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, i32 %offset, i32 %vl) { 1240; CHECK-LABEL: test_vssseg6_nxv4i16_triscv.vector.tuple_nxv8i8_6t: 1241; CHECK: # %bb.0: # %entry 1242; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma 1243; CHECK-NEXT: vssseg6e16.v v8, (a0), a1 1244; CHECK-NEXT: ret 1245entry: 1246 tail call void @llvm.riscv.vssseg6.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, i32 %offset, i32 %vl, i32 4) 1247 ret void 1248} 1249 1250define void @test_vssseg6_mask_nxv4i16_triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, i32 %offset, i32 %vl, <vscale x 4 x i1> %mask) { 1251; CHECK-LABEL: test_vssseg6_mask_nxv4i16_triscv.vector.tuple_nxv8i8_6t: 1252; CHECK: # %bb.0: # %entry 1253; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma 1254; CHECK-NEXT: vssseg6e16.v v8, (a0), a1, v0.t 1255; CHECK-NEXT: ret 1256entry: 1257 tail call void @llvm.riscv.vssseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, i32 %offset, <vscale x 4 x i1> %mask, i32 %vl, i32 4) 1258 ret void 1259} 1260 1261declare void @llvm.riscv.vssseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 7), ptr, i32, <vscale x 1 x i1>, i32, i32) 1262 1263define void @test_vssseg7_nxv1i16_triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) %val, ptr %base, i32 %offset, i32 %vl) { 1264; CHECK-LABEL: test_vssseg7_nxv1i16_triscv.vector.tuple_nxv2i8_7t: 1265; CHECK: # %bb.0: # %entry 1266; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma 1267; CHECK-NEXT: vssseg7e16.v v8, (a0), a1 1268; CHECK-NEXT: ret 1269entry: 1270 tail call void @llvm.riscv.vssseg7.triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) %val, ptr %base, i32 %offset, i32 %vl, i32 4) 1271 ret void 1272} 1273 1274define void @test_vssseg7_mask_nxv1i16_triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) %val, ptr %base, i32 %offset, i32 %vl, <vscale x 1 x i1> %mask) { 1275; CHECK-LABEL: test_vssseg7_mask_nxv1i16_triscv.vector.tuple_nxv2i8_7t: 1276; CHECK: # %bb.0: # %entry 1277; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma 1278; CHECK-NEXT: vssseg7e16.v v8, (a0), a1, v0.t 1279; CHECK-NEXT: ret 1280entry: 1281 tail call void @llvm.riscv.vssseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) %val, ptr %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl, i32 4) 1282 ret void 1283} 1284 1285declare void @llvm.riscv.vssseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 7), ptr, i32, <vscale x 2 x i1>, i32, i32) 1286 1287define void @test_vssseg7_nxv2i16_triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) %val, ptr %base, i32 %offset, i32 %vl) { 1288; CHECK-LABEL: test_vssseg7_nxv2i16_triscv.vector.tuple_nxv4i8_7t: 1289; CHECK: # %bb.0: # %entry 1290; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma 1291; CHECK-NEXT: vssseg7e16.v v8, (a0), a1 1292; CHECK-NEXT: ret 1293entry: 1294 tail call void @llvm.riscv.vssseg7.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) %val, ptr %base, i32 %offset, i32 %vl, i32 4) 1295 ret void 1296} 1297 1298define void @test_vssseg7_mask_nxv2i16_triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) %val, ptr %base, i32 %offset, i32 %vl, <vscale x 2 x i1> %mask) { 1299; CHECK-LABEL: test_vssseg7_mask_nxv2i16_triscv.vector.tuple_nxv4i8_7t: 1300; CHECK: # %bb.0: # %entry 1301; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma 1302; CHECK-NEXT: vssseg7e16.v v8, (a0), a1, v0.t 1303; CHECK-NEXT: ret 1304entry: 1305 tail call void @llvm.riscv.vssseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) %val, ptr %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl, i32 4) 1306 ret void 1307} 1308 1309declare void @llvm.riscv.vssseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 7), ptr, i32, <vscale x 4 x i1>, i32, i32) 1310 1311define void @test_vssseg7_nxv4i16_triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, i32 %offset, i32 %vl) { 1312; CHECK-LABEL: test_vssseg7_nxv4i16_triscv.vector.tuple_nxv8i8_7t: 1313; CHECK: # %bb.0: # %entry 1314; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma 1315; CHECK-NEXT: vssseg7e16.v v8, (a0), a1 1316; CHECK-NEXT: ret 1317entry: 1318 tail call void @llvm.riscv.vssseg7.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, i32 %offset, i32 %vl, i32 4) 1319 ret void 1320} 1321 1322define void @test_vssseg7_mask_nxv4i16_triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, i32 %offset, i32 %vl, <vscale x 4 x i1> %mask) { 1323; CHECK-LABEL: test_vssseg7_mask_nxv4i16_triscv.vector.tuple_nxv8i8_7t: 1324; CHECK: # %bb.0: # %entry 1325; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma 1326; CHECK-NEXT: vssseg7e16.v v8, (a0), a1, v0.t 1327; CHECK-NEXT: ret 1328entry: 1329 tail call void @llvm.riscv.vssseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, i32 %offset, <vscale x 4 x i1> %mask, i32 %vl, i32 4) 1330 ret void 1331} 1332 1333declare void @llvm.riscv.vssseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 8), ptr, i32, <vscale x 1 x i1>, i32, i32) 1334 1335define void @test_vssseg8_nxv1i16_triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) %val, ptr %base, i32 %offset, i32 %vl) { 1336; CHECK-LABEL: test_vssseg8_nxv1i16_triscv.vector.tuple_nxv2i8_8t: 1337; CHECK: # %bb.0: # %entry 1338; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma 1339; CHECK-NEXT: vssseg8e16.v v8, (a0), a1 1340; CHECK-NEXT: ret 1341entry: 1342 tail call void @llvm.riscv.vssseg8.triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) %val, ptr %base, i32 %offset, i32 %vl, i32 4) 1343 ret void 1344} 1345 1346define void @test_vssseg8_mask_nxv1i16_triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) %val, ptr %base, i32 %offset, i32 %vl, <vscale x 1 x i1> %mask) { 1347; CHECK-LABEL: test_vssseg8_mask_nxv1i16_triscv.vector.tuple_nxv2i8_8t: 1348; CHECK: # %bb.0: # %entry 1349; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma 1350; CHECK-NEXT: vssseg8e16.v v8, (a0), a1, v0.t 1351; CHECK-NEXT: ret 1352entry: 1353 tail call void @llvm.riscv.vssseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) %val, ptr %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl, i32 4) 1354 ret void 1355} 1356 1357declare void @llvm.riscv.vssseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 8), ptr, i32, <vscale x 2 x i1>, i32, i32) 1358 1359define void @test_vssseg8_nxv2i16_triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %val, ptr %base, i32 %offset, i32 %vl) { 1360; CHECK-LABEL: test_vssseg8_nxv2i16_triscv.vector.tuple_nxv4i8_8t: 1361; CHECK: # %bb.0: # %entry 1362; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma 1363; CHECK-NEXT: vssseg8e16.v v8, (a0), a1 1364; CHECK-NEXT: ret 1365entry: 1366 tail call void @llvm.riscv.vssseg8.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %val, ptr %base, i32 %offset, i32 %vl, i32 4) 1367 ret void 1368} 1369 1370define void @test_vssseg8_mask_nxv2i16_triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %val, ptr %base, i32 %offset, i32 %vl, <vscale x 2 x i1> %mask) { 1371; CHECK-LABEL: test_vssseg8_mask_nxv2i16_triscv.vector.tuple_nxv4i8_8t: 1372; CHECK: # %bb.0: # %entry 1373; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma 1374; CHECK-NEXT: vssseg8e16.v v8, (a0), a1, v0.t 1375; CHECK-NEXT: ret 1376entry: 1377 tail call void @llvm.riscv.vssseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %val, ptr %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl, i32 4) 1378 ret void 1379} 1380 1381declare void @llvm.riscv.vssseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 8), ptr, i32, <vscale x 4 x i1>, i32, i32) 1382 1383define void @test_vssseg8_nxv4i16_triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, i32 %offset, i32 %vl) { 1384; CHECK-LABEL: test_vssseg8_nxv4i16_triscv.vector.tuple_nxv8i8_8t: 1385; CHECK: # %bb.0: # %entry 1386; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma 1387; CHECK-NEXT: vssseg8e16.v v8, (a0), a1 1388; CHECK-NEXT: ret 1389entry: 1390 tail call void @llvm.riscv.vssseg8.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, i32 %offset, i32 %vl, i32 4) 1391 ret void 1392} 1393 1394define void @test_vssseg8_mask_nxv4i16_triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, i32 %offset, i32 %vl, <vscale x 4 x i1> %mask) { 1395; CHECK-LABEL: test_vssseg8_mask_nxv4i16_triscv.vector.tuple_nxv8i8_8t: 1396; CHECK: # %bb.0: # %entry 1397; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma 1398; CHECK-NEXT: vssseg8e16.v v8, (a0), a1, v0.t 1399; CHECK-NEXT: ret 1400entry: 1401 tail call void @llvm.riscv.vssseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, i32 %offset, <vscale x 4 x i1> %mask, i32 %vl, i32 4) 1402 ret void 1403} 1404 1405declare void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 2), ptr, i32, <vscale x 1 x i1>, i32, i32) 1406 1407define void @test_vssseg2_nxv1i32_triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %val, ptr %base, i32 %offset, i32 %vl) { 1408; CHECK-LABEL: test_vssseg2_nxv1i32_triscv.vector.tuple_nxv4i8_2t: 1409; CHECK: # %bb.0: # %entry 1410; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma 1411; CHECK-NEXT: vssseg2e32.v v8, (a0), a1 1412; CHECK-NEXT: ret 1413entry: 1414 tail call void @llvm.riscv.vssseg2.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %val, ptr %base, i32 %offset, i32 %vl, i32 5) 1415 ret void 1416} 1417 1418define void @test_vssseg2_mask_nxv1i32_triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %val, ptr %base, i32 %offset, i32 %vl, <vscale x 1 x i1> %mask) { 1419; CHECK-LABEL: test_vssseg2_mask_nxv1i32_triscv.vector.tuple_nxv4i8_2t: 1420; CHECK: # %bb.0: # %entry 1421; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma 1422; CHECK-NEXT: vssseg2e32.v v8, (a0), a1, v0.t 1423; CHECK-NEXT: ret 1424entry: 1425 tail call void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %val, ptr %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl, i32 5) 1426 ret void 1427} 1428 1429declare void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 2), ptr, i32, <vscale x 2 x i1>, i32, i32) 1430 1431define void @test_vssseg2_nxv2i32_triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, i32 %offset, i32 %vl) { 1432; CHECK-LABEL: test_vssseg2_nxv2i32_triscv.vector.tuple_nxv8i8_2t: 1433; CHECK: # %bb.0: # %entry 1434; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma 1435; CHECK-NEXT: vssseg2e32.v v8, (a0), a1 1436; CHECK-NEXT: ret 1437entry: 1438 tail call void @llvm.riscv.vssseg2.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, i32 %offset, i32 %vl, i32 5) 1439 ret void 1440} 1441 1442define void @test_vssseg2_mask_nxv2i32_triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, i32 %offset, i32 %vl, <vscale x 2 x i1> %mask) { 1443; CHECK-LABEL: test_vssseg2_mask_nxv2i32_triscv.vector.tuple_nxv8i8_2t: 1444; CHECK: # %bb.0: # %entry 1445; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma 1446; CHECK-NEXT: vssseg2e32.v v8, (a0), a1, v0.t 1447; CHECK-NEXT: ret 1448entry: 1449 tail call void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl, i32 5) 1450 ret void 1451} 1452 1453declare void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 2), ptr, i32, <vscale x 4 x i1>, i32, i32) 1454 1455define void @test_vssseg2_nxv4i32_triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, i32 %offset, i32 %vl) { 1456; CHECK-LABEL: test_vssseg2_nxv4i32_triscv.vector.tuple_nxv16i8_2t: 1457; CHECK: # %bb.0: # %entry 1458; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, ma 1459; CHECK-NEXT: vssseg2e32.v v8, (a0), a1 1460; CHECK-NEXT: ret 1461entry: 1462 tail call void @llvm.riscv.vssseg2.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, i32 %offset, i32 %vl, i32 5) 1463 ret void 1464} 1465 1466define void @test_vssseg2_mask_nxv4i32_triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, i32 %offset, i32 %vl, <vscale x 4 x i1> %mask) { 1467; CHECK-LABEL: test_vssseg2_mask_nxv4i32_triscv.vector.tuple_nxv16i8_2t: 1468; CHECK: # %bb.0: # %entry 1469; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, ma 1470; CHECK-NEXT: vssseg2e32.v v8, (a0), a1, v0.t 1471; CHECK-NEXT: ret 1472entry: 1473 tail call void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, i32 %offset, <vscale x 4 x i1> %mask, i32 %vl, i32 5) 1474 ret void 1475} 1476 1477declare void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i1(target("riscv.vector.tuple", <vscale x 32 x i8>, 2), ptr, i32, <vscale x 8 x i1>, i32, i32) 1478 1479define void @test_vssseg2_nxv8i32_triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, i32 %offset, i32 %vl) { 1480; CHECK-LABEL: test_vssseg2_nxv8i32_triscv.vector.tuple_nxv32i8_2t: 1481; CHECK: # %bb.0: # %entry 1482; CHECK-NEXT: vsetvli zero, a2, e32, m4, ta, ma 1483; CHECK-NEXT: vssseg2e32.v v8, (a0), a1 1484; CHECK-NEXT: ret 1485entry: 1486 tail call void @llvm.riscv.vssseg2.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, i32 %offset, i32 %vl, i32 5) 1487 ret void 1488} 1489 1490define void @test_vssseg2_mask_nxv8i32_triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, i32 %offset, i32 %vl, <vscale x 8 x i1> %mask) { 1491; CHECK-LABEL: test_vssseg2_mask_nxv8i32_triscv.vector.tuple_nxv32i8_2t: 1492; CHECK: # %bb.0: # %entry 1493; CHECK-NEXT: vsetvli zero, a2, e32, m4, ta, ma 1494; CHECK-NEXT: vssseg2e32.v v8, (a0), a1, v0.t 1495; CHECK-NEXT: ret 1496entry: 1497 tail call void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i1(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, i32 %offset, <vscale x 8 x i1> %mask, i32 %vl, i32 5) 1498 ret void 1499} 1500 1501declare void @llvm.riscv.vssseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 3), ptr, i32, <vscale x 1 x i1>, i32, i32) 1502 1503define void @test_vssseg3_nxv1i32_triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %val, ptr %base, i32 %offset, i32 %vl) { 1504; CHECK-LABEL: test_vssseg3_nxv1i32_triscv.vector.tuple_nxv4i8_3t: 1505; CHECK: # %bb.0: # %entry 1506; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma 1507; CHECK-NEXT: vssseg3e32.v v8, (a0), a1 1508; CHECK-NEXT: ret 1509entry: 1510 tail call void @llvm.riscv.vssseg3.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %val, ptr %base, i32 %offset, i32 %vl, i32 5) 1511 ret void 1512} 1513 1514define void @test_vssseg3_mask_nxv1i32_triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %val, ptr %base, i32 %offset, i32 %vl, <vscale x 1 x i1> %mask) { 1515; CHECK-LABEL: test_vssseg3_mask_nxv1i32_triscv.vector.tuple_nxv4i8_3t: 1516; CHECK: # %bb.0: # %entry 1517; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma 1518; CHECK-NEXT: vssseg3e32.v v8, (a0), a1, v0.t 1519; CHECK-NEXT: ret 1520entry: 1521 tail call void @llvm.riscv.vssseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %val, ptr %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl, i32 5) 1522 ret void 1523} 1524 1525declare void @llvm.riscv.vssseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 3), ptr, i32, <vscale x 2 x i1>, i32, i32) 1526 1527define void @test_vssseg3_nxv2i32_triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, i32 %offset, i32 %vl) { 1528; CHECK-LABEL: test_vssseg3_nxv2i32_triscv.vector.tuple_nxv8i8_3t: 1529; CHECK: # %bb.0: # %entry 1530; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma 1531; CHECK-NEXT: vssseg3e32.v v8, (a0), a1 1532; CHECK-NEXT: ret 1533entry: 1534 tail call void @llvm.riscv.vssseg3.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, i32 %offset, i32 %vl, i32 5) 1535 ret void 1536} 1537 1538define void @test_vssseg3_mask_nxv2i32_triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, i32 %offset, i32 %vl, <vscale x 2 x i1> %mask) { 1539; CHECK-LABEL: test_vssseg3_mask_nxv2i32_triscv.vector.tuple_nxv8i8_3t: 1540; CHECK: # %bb.0: # %entry 1541; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma 1542; CHECK-NEXT: vssseg3e32.v v8, (a0), a1, v0.t 1543; CHECK-NEXT: ret 1544entry: 1545 tail call void @llvm.riscv.vssseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl, i32 5) 1546 ret void 1547} 1548 1549declare void @llvm.riscv.vssseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 3), ptr, i32, <vscale x 4 x i1>, i32, i32) 1550 1551define void @test_vssseg3_nxv4i32_triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, i32 %offset, i32 %vl) { 1552; CHECK-LABEL: test_vssseg3_nxv4i32_triscv.vector.tuple_nxv16i8_3t: 1553; CHECK: # %bb.0: # %entry 1554; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, ma 1555; CHECK-NEXT: vssseg3e32.v v8, (a0), a1 1556; CHECK-NEXT: ret 1557entry: 1558 tail call void @llvm.riscv.vssseg3.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, i32 %offset, i32 %vl, i32 5) 1559 ret void 1560} 1561 1562define void @test_vssseg3_mask_nxv4i32_triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, i32 %offset, i32 %vl, <vscale x 4 x i1> %mask) { 1563; CHECK-LABEL: test_vssseg3_mask_nxv4i32_triscv.vector.tuple_nxv16i8_3t: 1564; CHECK: # %bb.0: # %entry 1565; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, ma 1566; CHECK-NEXT: vssseg3e32.v v8, (a0), a1, v0.t 1567; CHECK-NEXT: ret 1568entry: 1569 tail call void @llvm.riscv.vssseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, i32 %offset, <vscale x 4 x i1> %mask, i32 %vl, i32 5) 1570 ret void 1571} 1572 1573declare void @llvm.riscv.vssseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 4), ptr, i32, <vscale x 1 x i1>, i32, i32) 1574 1575define void @test_vssseg4_nxv1i32_triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %val, ptr %base, i32 %offset, i32 %vl) { 1576; CHECK-LABEL: test_vssseg4_nxv1i32_triscv.vector.tuple_nxv4i8_4t: 1577; CHECK: # %bb.0: # %entry 1578; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma 1579; CHECK-NEXT: vssseg4e32.v v8, (a0), a1 1580; CHECK-NEXT: ret 1581entry: 1582 tail call void @llvm.riscv.vssseg4.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %val, ptr %base, i32 %offset, i32 %vl, i32 5) 1583 ret void 1584} 1585 1586define void @test_vssseg4_mask_nxv1i32_triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %val, ptr %base, i32 %offset, i32 %vl, <vscale x 1 x i1> %mask) { 1587; CHECK-LABEL: test_vssseg4_mask_nxv1i32_triscv.vector.tuple_nxv4i8_4t: 1588; CHECK: # %bb.0: # %entry 1589; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma 1590; CHECK-NEXT: vssseg4e32.v v8, (a0), a1, v0.t 1591; CHECK-NEXT: ret 1592entry: 1593 tail call void @llvm.riscv.vssseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %val, ptr %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl, i32 5) 1594 ret void 1595} 1596 1597declare void @llvm.riscv.vssseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 4), ptr, i32, <vscale x 2 x i1>, i32, i32) 1598 1599define void @test_vssseg4_nxv2i32_triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, i32 %offset, i32 %vl) { 1600; CHECK-LABEL: test_vssseg4_nxv2i32_triscv.vector.tuple_nxv8i8_4t: 1601; CHECK: # %bb.0: # %entry 1602; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma 1603; CHECK-NEXT: vssseg4e32.v v8, (a0), a1 1604; CHECK-NEXT: ret 1605entry: 1606 tail call void @llvm.riscv.vssseg4.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, i32 %offset, i32 %vl, i32 5) 1607 ret void 1608} 1609 1610define void @test_vssseg4_mask_nxv2i32_triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, i32 %offset, i32 %vl, <vscale x 2 x i1> %mask) { 1611; CHECK-LABEL: test_vssseg4_mask_nxv2i32_triscv.vector.tuple_nxv8i8_4t: 1612; CHECK: # %bb.0: # %entry 1613; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma 1614; CHECK-NEXT: vssseg4e32.v v8, (a0), a1, v0.t 1615; CHECK-NEXT: ret 1616entry: 1617 tail call void @llvm.riscv.vssseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl, i32 5) 1618 ret void 1619} 1620 1621declare void @llvm.riscv.vssseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 4), ptr, i32, <vscale x 4 x i1>, i32, i32) 1622 1623define void @test_vssseg4_nxv4i32_triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, i32 %offset, i32 %vl) { 1624; CHECK-LABEL: test_vssseg4_nxv4i32_triscv.vector.tuple_nxv16i8_4t: 1625; CHECK: # %bb.0: # %entry 1626; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, ma 1627; CHECK-NEXT: vssseg4e32.v v8, (a0), a1 1628; CHECK-NEXT: ret 1629entry: 1630 tail call void @llvm.riscv.vssseg4.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, i32 %offset, i32 %vl, i32 5) 1631 ret void 1632} 1633 1634define void @test_vssseg4_mask_nxv4i32_triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, i32 %offset, i32 %vl, <vscale x 4 x i1> %mask) { 1635; CHECK-LABEL: test_vssseg4_mask_nxv4i32_triscv.vector.tuple_nxv16i8_4t: 1636; CHECK: # %bb.0: # %entry 1637; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, ma 1638; CHECK-NEXT: vssseg4e32.v v8, (a0), a1, v0.t 1639; CHECK-NEXT: ret 1640entry: 1641 tail call void @llvm.riscv.vssseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, i32 %offset, <vscale x 4 x i1> %mask, i32 %vl, i32 5) 1642 ret void 1643} 1644 1645declare void @llvm.riscv.vssseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 5), ptr, i32, <vscale x 1 x i1>, i32, i32) 1646 1647define void @test_vssseg5_nxv1i32_triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) %val, ptr %base, i32 %offset, i32 %vl) { 1648; CHECK-LABEL: test_vssseg5_nxv1i32_triscv.vector.tuple_nxv4i8_5t: 1649; CHECK: # %bb.0: # %entry 1650; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma 1651; CHECK-NEXT: vssseg5e32.v v8, (a0), a1 1652; CHECK-NEXT: ret 1653entry: 1654 tail call void @llvm.riscv.vssseg5.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) %val, ptr %base, i32 %offset, i32 %vl, i32 5) 1655 ret void 1656} 1657 1658define void @test_vssseg5_mask_nxv1i32_triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) %val, ptr %base, i32 %offset, i32 %vl, <vscale x 1 x i1> %mask) { 1659; CHECK-LABEL: test_vssseg5_mask_nxv1i32_triscv.vector.tuple_nxv4i8_5t: 1660; CHECK: # %bb.0: # %entry 1661; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma 1662; CHECK-NEXT: vssseg5e32.v v8, (a0), a1, v0.t 1663; CHECK-NEXT: ret 1664entry: 1665 tail call void @llvm.riscv.vssseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) %val, ptr %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl, i32 5) 1666 ret void 1667} 1668 1669declare void @llvm.riscv.vssseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 5), ptr, i32, <vscale x 2 x i1>, i32, i32) 1670 1671define void @test_vssseg5_nxv2i32_triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, i32 %offset, i32 %vl) { 1672; CHECK-LABEL: test_vssseg5_nxv2i32_triscv.vector.tuple_nxv8i8_5t: 1673; CHECK: # %bb.0: # %entry 1674; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma 1675; CHECK-NEXT: vssseg5e32.v v8, (a0), a1 1676; CHECK-NEXT: ret 1677entry: 1678 tail call void @llvm.riscv.vssseg5.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, i32 %offset, i32 %vl, i32 5) 1679 ret void 1680} 1681 1682define void @test_vssseg5_mask_nxv2i32_triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, i32 %offset, i32 %vl, <vscale x 2 x i1> %mask) { 1683; CHECK-LABEL: test_vssseg5_mask_nxv2i32_triscv.vector.tuple_nxv8i8_5t: 1684; CHECK: # %bb.0: # %entry 1685; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma 1686; CHECK-NEXT: vssseg5e32.v v8, (a0), a1, v0.t 1687; CHECK-NEXT: ret 1688entry: 1689 tail call void @llvm.riscv.vssseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl, i32 5) 1690 ret void 1691} 1692 1693declare void @llvm.riscv.vssseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 6), ptr, i32, <vscale x 1 x i1>, i32, i32) 1694 1695define void @test_vssseg6_nxv1i32_triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) %val, ptr %base, i32 %offset, i32 %vl) { 1696; CHECK-LABEL: test_vssseg6_nxv1i32_triscv.vector.tuple_nxv4i8_6t: 1697; CHECK: # %bb.0: # %entry 1698; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma 1699; CHECK-NEXT: vssseg6e32.v v8, (a0), a1 1700; CHECK-NEXT: ret 1701entry: 1702 tail call void @llvm.riscv.vssseg6.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) %val, ptr %base, i32 %offset, i32 %vl, i32 5) 1703 ret void 1704} 1705 1706define void @test_vssseg6_mask_nxv1i32_triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) %val, ptr %base, i32 %offset, i32 %vl, <vscale x 1 x i1> %mask) { 1707; CHECK-LABEL: test_vssseg6_mask_nxv1i32_triscv.vector.tuple_nxv4i8_6t: 1708; CHECK: # %bb.0: # %entry 1709; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma 1710; CHECK-NEXT: vssseg6e32.v v8, (a0), a1, v0.t 1711; CHECK-NEXT: ret 1712entry: 1713 tail call void @llvm.riscv.vssseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) %val, ptr %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl, i32 5) 1714 ret void 1715} 1716 1717declare void @llvm.riscv.vssseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 6), ptr, i32, <vscale x 2 x i1>, i32, i32) 1718 1719define void @test_vssseg6_nxv2i32_triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, i32 %offset, i32 %vl) { 1720; CHECK-LABEL: test_vssseg6_nxv2i32_triscv.vector.tuple_nxv8i8_6t: 1721; CHECK: # %bb.0: # %entry 1722; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma 1723; CHECK-NEXT: vssseg6e32.v v8, (a0), a1 1724; CHECK-NEXT: ret 1725entry: 1726 tail call void @llvm.riscv.vssseg6.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, i32 %offset, i32 %vl, i32 5) 1727 ret void 1728} 1729 1730define void @test_vssseg6_mask_nxv2i32_triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, i32 %offset, i32 %vl, <vscale x 2 x i1> %mask) { 1731; CHECK-LABEL: test_vssseg6_mask_nxv2i32_triscv.vector.tuple_nxv8i8_6t: 1732; CHECK: # %bb.0: # %entry 1733; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma 1734; CHECK-NEXT: vssseg6e32.v v8, (a0), a1, v0.t 1735; CHECK-NEXT: ret 1736entry: 1737 tail call void @llvm.riscv.vssseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl, i32 5) 1738 ret void 1739} 1740 1741declare void @llvm.riscv.vssseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 7), ptr, i32, <vscale x 1 x i1>, i32, i32) 1742 1743define void @test_vssseg7_nxv1i32_triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) %val, ptr %base, i32 %offset, i32 %vl) { 1744; CHECK-LABEL: test_vssseg7_nxv1i32_triscv.vector.tuple_nxv4i8_7t: 1745; CHECK: # %bb.0: # %entry 1746; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma 1747; CHECK-NEXT: vssseg7e32.v v8, (a0), a1 1748; CHECK-NEXT: ret 1749entry: 1750 tail call void @llvm.riscv.vssseg7.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) %val, ptr %base, i32 %offset, i32 %vl, i32 5) 1751 ret void 1752} 1753 1754define void @test_vssseg7_mask_nxv1i32_triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) %val, ptr %base, i32 %offset, i32 %vl, <vscale x 1 x i1> %mask) { 1755; CHECK-LABEL: test_vssseg7_mask_nxv1i32_triscv.vector.tuple_nxv4i8_7t: 1756; CHECK: # %bb.0: # %entry 1757; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma 1758; CHECK-NEXT: vssseg7e32.v v8, (a0), a1, v0.t 1759; CHECK-NEXT: ret 1760entry: 1761 tail call void @llvm.riscv.vssseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) %val, ptr %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl, i32 5) 1762 ret void 1763} 1764 1765declare void @llvm.riscv.vssseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 7), ptr, i32, <vscale x 2 x i1>, i32, i32) 1766 1767define void @test_vssseg7_nxv2i32_triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, i32 %offset, i32 %vl) { 1768; CHECK-LABEL: test_vssseg7_nxv2i32_triscv.vector.tuple_nxv8i8_7t: 1769; CHECK: # %bb.0: # %entry 1770; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma 1771; CHECK-NEXT: vssseg7e32.v v8, (a0), a1 1772; CHECK-NEXT: ret 1773entry: 1774 tail call void @llvm.riscv.vssseg7.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, i32 %offset, i32 %vl, i32 5) 1775 ret void 1776} 1777 1778define void @test_vssseg7_mask_nxv2i32_triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, i32 %offset, i32 %vl, <vscale x 2 x i1> %mask) { 1779; CHECK-LABEL: test_vssseg7_mask_nxv2i32_triscv.vector.tuple_nxv8i8_7t: 1780; CHECK: # %bb.0: # %entry 1781; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma 1782; CHECK-NEXT: vssseg7e32.v v8, (a0), a1, v0.t 1783; CHECK-NEXT: ret 1784entry: 1785 tail call void @llvm.riscv.vssseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl, i32 5) 1786 ret void 1787} 1788 1789declare void @llvm.riscv.vssseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 8), ptr, i32, <vscale x 1 x i1>, i32, i32) 1790 1791define void @test_vssseg8_nxv1i32_triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %val, ptr %base, i32 %offset, i32 %vl) { 1792; CHECK-LABEL: test_vssseg8_nxv1i32_triscv.vector.tuple_nxv4i8_8t: 1793; CHECK: # %bb.0: # %entry 1794; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma 1795; CHECK-NEXT: vssseg8e32.v v8, (a0), a1 1796; CHECK-NEXT: ret 1797entry: 1798 tail call void @llvm.riscv.vssseg8.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %val, ptr %base, i32 %offset, i32 %vl, i32 5) 1799 ret void 1800} 1801 1802define void @test_vssseg8_mask_nxv1i32_triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %val, ptr %base, i32 %offset, i32 %vl, <vscale x 1 x i1> %mask) { 1803; CHECK-LABEL: test_vssseg8_mask_nxv1i32_triscv.vector.tuple_nxv4i8_8t: 1804; CHECK: # %bb.0: # %entry 1805; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma 1806; CHECK-NEXT: vssseg8e32.v v8, (a0), a1, v0.t 1807; CHECK-NEXT: ret 1808entry: 1809 tail call void @llvm.riscv.vssseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %val, ptr %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl, i32 5) 1810 ret void 1811} 1812 1813declare void @llvm.riscv.vssseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 8), ptr, i32, <vscale x 2 x i1>, i32, i32) 1814 1815define void @test_vssseg8_nxv2i32_triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, i32 %offset, i32 %vl) { 1816; CHECK-LABEL: test_vssseg8_nxv2i32_triscv.vector.tuple_nxv8i8_8t: 1817; CHECK: # %bb.0: # %entry 1818; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma 1819; CHECK-NEXT: vssseg8e32.v v8, (a0), a1 1820; CHECK-NEXT: ret 1821entry: 1822 tail call void @llvm.riscv.vssseg8.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, i32 %offset, i32 %vl, i32 5) 1823 ret void 1824} 1825 1826define void @test_vssseg8_mask_nxv2i32_triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, i32 %offset, i32 %vl, <vscale x 2 x i1> %mask) { 1827; CHECK-LABEL: test_vssseg8_mask_nxv2i32_triscv.vector.tuple_nxv8i8_8t: 1828; CHECK: # %bb.0: # %entry 1829; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma 1830; CHECK-NEXT: vssseg8e32.v v8, (a0), a1, v0.t 1831; CHECK-NEXT: ret 1832entry: 1833 tail call void @llvm.riscv.vssseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl, i32 5) 1834 ret void 1835} 1836 1837declare void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 2), ptr, i32, <vscale x 1 x i1>, i32, i32) 1838 1839define void @test_vssseg2_nxv1i64_triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, i32 %offset, i32 %vl) { 1840; CHECK-LABEL: test_vssseg2_nxv1i64_triscv.vector.tuple_nxv8i8_2t: 1841; CHECK: # %bb.0: # %entry 1842; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma 1843; CHECK-NEXT: vssseg2e64.v v8, (a0), a1 1844; CHECK-NEXT: ret 1845entry: 1846 tail call void @llvm.riscv.vssseg2.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, i32 %offset, i32 %vl, i32 6) 1847 ret void 1848} 1849 1850define void @test_vssseg2_mask_nxv1i64_triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, i32 %offset, i32 %vl, <vscale x 1 x i1> %mask) { 1851; CHECK-LABEL: test_vssseg2_mask_nxv1i64_triscv.vector.tuple_nxv8i8_2t: 1852; CHECK: # %bb.0: # %entry 1853; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma 1854; CHECK-NEXT: vssseg2e64.v v8, (a0), a1, v0.t 1855; CHECK-NEXT: ret 1856entry: 1857 tail call void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl, i32 6) 1858 ret void 1859} 1860 1861declare void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 2), ptr, i32, <vscale x 2 x i1>, i32, i32) 1862 1863define void @test_vssseg2_nxv2i64_triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, i32 %offset, i32 %vl) { 1864; CHECK-LABEL: test_vssseg2_nxv2i64_triscv.vector.tuple_nxv16i8_2t: 1865; CHECK: # %bb.0: # %entry 1866; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma 1867; CHECK-NEXT: vssseg2e64.v v8, (a0), a1 1868; CHECK-NEXT: ret 1869entry: 1870 tail call void @llvm.riscv.vssseg2.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, i32 %offset, i32 %vl, i32 6) 1871 ret void 1872} 1873 1874define void @test_vssseg2_mask_nxv2i64_triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, i32 %offset, i32 %vl, <vscale x 2 x i1> %mask) { 1875; CHECK-LABEL: test_vssseg2_mask_nxv2i64_triscv.vector.tuple_nxv16i8_2t: 1876; CHECK: # %bb.0: # %entry 1877; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma 1878; CHECK-NEXT: vssseg2e64.v v8, (a0), a1, v0.t 1879; CHECK-NEXT: ret 1880entry: 1881 tail call void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl, i32 6) 1882 ret void 1883} 1884 1885declare void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i1(target("riscv.vector.tuple", <vscale x 32 x i8>, 2), ptr, i32, <vscale x 4 x i1>, i32, i32) 1886 1887define void @test_vssseg2_nxv4i64_triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, i32 %offset, i32 %vl) { 1888; CHECK-LABEL: test_vssseg2_nxv4i64_triscv.vector.tuple_nxv32i8_2t: 1889; CHECK: # %bb.0: # %entry 1890; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, ma 1891; CHECK-NEXT: vssseg2e64.v v8, (a0), a1 1892; CHECK-NEXT: ret 1893entry: 1894 tail call void @llvm.riscv.vssseg2.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, i32 %offset, i32 %vl, i32 6) 1895 ret void 1896} 1897 1898define void @test_vssseg2_mask_nxv4i64_triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, i32 %offset, i32 %vl, <vscale x 4 x i1> %mask) { 1899; CHECK-LABEL: test_vssseg2_mask_nxv4i64_triscv.vector.tuple_nxv32i8_2t: 1900; CHECK: # %bb.0: # %entry 1901; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, ma 1902; CHECK-NEXT: vssseg2e64.v v8, (a0), a1, v0.t 1903; CHECK-NEXT: ret 1904entry: 1905 tail call void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i1(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, i32 %offset, <vscale x 4 x i1> %mask, i32 %vl, i32 6) 1906 ret void 1907} 1908 1909declare void @llvm.riscv.vssseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 3), ptr, i32, <vscale x 1 x i1>, i32, i32) 1910 1911define void @test_vssseg3_nxv1i64_triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, i32 %offset, i32 %vl) { 1912; CHECK-LABEL: test_vssseg3_nxv1i64_triscv.vector.tuple_nxv8i8_3t: 1913; CHECK: # %bb.0: # %entry 1914; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma 1915; CHECK-NEXT: vssseg3e64.v v8, (a0), a1 1916; CHECK-NEXT: ret 1917entry: 1918 tail call void @llvm.riscv.vssseg3.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, i32 %offset, i32 %vl, i32 6) 1919 ret void 1920} 1921 1922define void @test_vssseg3_mask_nxv1i64_triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, i32 %offset, i32 %vl, <vscale x 1 x i1> %mask) { 1923; CHECK-LABEL: test_vssseg3_mask_nxv1i64_triscv.vector.tuple_nxv8i8_3t: 1924; CHECK: # %bb.0: # %entry 1925; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma 1926; CHECK-NEXT: vssseg3e64.v v8, (a0), a1, v0.t 1927; CHECK-NEXT: ret 1928entry: 1929 tail call void @llvm.riscv.vssseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl, i32 6) 1930 ret void 1931} 1932 1933declare void @llvm.riscv.vssseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 3), ptr, i32, <vscale x 2 x i1>, i32, i32) 1934 1935define void @test_vssseg3_nxv2i64_triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, i32 %offset, i32 %vl) { 1936; CHECK-LABEL: test_vssseg3_nxv2i64_triscv.vector.tuple_nxv16i8_3t: 1937; CHECK: # %bb.0: # %entry 1938; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma 1939; CHECK-NEXT: vssseg3e64.v v8, (a0), a1 1940; CHECK-NEXT: ret 1941entry: 1942 tail call void @llvm.riscv.vssseg3.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, i32 %offset, i32 %vl, i32 6) 1943 ret void 1944} 1945 1946define void @test_vssseg3_mask_nxv2i64_triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, i32 %offset, i32 %vl, <vscale x 2 x i1> %mask) { 1947; CHECK-LABEL: test_vssseg3_mask_nxv2i64_triscv.vector.tuple_nxv16i8_3t: 1948; CHECK: # %bb.0: # %entry 1949; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma 1950; CHECK-NEXT: vssseg3e64.v v8, (a0), a1, v0.t 1951; CHECK-NEXT: ret 1952entry: 1953 tail call void @llvm.riscv.vssseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl, i32 6) 1954 ret void 1955} 1956 1957declare void @llvm.riscv.vssseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 4), ptr, i32, <vscale x 1 x i1>, i32, i32) 1958 1959define void @test_vssseg4_nxv1i64_triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, i32 %offset, i32 %vl) { 1960; CHECK-LABEL: test_vssseg4_nxv1i64_triscv.vector.tuple_nxv8i8_4t: 1961; CHECK: # %bb.0: # %entry 1962; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma 1963; CHECK-NEXT: vssseg4e64.v v8, (a0), a1 1964; CHECK-NEXT: ret 1965entry: 1966 tail call void @llvm.riscv.vssseg4.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, i32 %offset, i32 %vl, i32 6) 1967 ret void 1968} 1969 1970define void @test_vssseg4_mask_nxv1i64_triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, i32 %offset, i32 %vl, <vscale x 1 x i1> %mask) { 1971; CHECK-LABEL: test_vssseg4_mask_nxv1i64_triscv.vector.tuple_nxv8i8_4t: 1972; CHECK: # %bb.0: # %entry 1973; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma 1974; CHECK-NEXT: vssseg4e64.v v8, (a0), a1, v0.t 1975; CHECK-NEXT: ret 1976entry: 1977 tail call void @llvm.riscv.vssseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl, i32 6) 1978 ret void 1979} 1980 1981declare void @llvm.riscv.vssseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 4), ptr, i32, <vscale x 2 x i1>, i32, i32) 1982 1983define void @test_vssseg4_nxv2i64_triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, i32 %offset, i32 %vl) { 1984; CHECK-LABEL: test_vssseg4_nxv2i64_triscv.vector.tuple_nxv16i8_4t: 1985; CHECK: # %bb.0: # %entry 1986; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma 1987; CHECK-NEXT: vssseg4e64.v v8, (a0), a1 1988; CHECK-NEXT: ret 1989entry: 1990 tail call void @llvm.riscv.vssseg4.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, i32 %offset, i32 %vl, i32 6) 1991 ret void 1992} 1993 1994define void @test_vssseg4_mask_nxv2i64_triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, i32 %offset, i32 %vl, <vscale x 2 x i1> %mask) { 1995; CHECK-LABEL: test_vssseg4_mask_nxv2i64_triscv.vector.tuple_nxv16i8_4t: 1996; CHECK: # %bb.0: # %entry 1997; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma 1998; CHECK-NEXT: vssseg4e64.v v8, (a0), a1, v0.t 1999; CHECK-NEXT: ret 2000entry: 2001 tail call void @llvm.riscv.vssseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl, i32 6) 2002 ret void 2003} 2004 2005declare void @llvm.riscv.vssseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 5), ptr, i32, <vscale x 1 x i1>, i32, i32) 2006 2007define void @test_vssseg5_nxv1i64_triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, i32 %offset, i32 %vl) { 2008; CHECK-LABEL: test_vssseg5_nxv1i64_triscv.vector.tuple_nxv8i8_5t: 2009; CHECK: # %bb.0: # %entry 2010; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma 2011; CHECK-NEXT: vssseg5e64.v v8, (a0), a1 2012; CHECK-NEXT: ret 2013entry: 2014 tail call void @llvm.riscv.vssseg5.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, i32 %offset, i32 %vl, i32 6) 2015 ret void 2016} 2017 2018define void @test_vssseg5_mask_nxv1i64_triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, i32 %offset, i32 %vl, <vscale x 1 x i1> %mask) { 2019; CHECK-LABEL: test_vssseg5_mask_nxv1i64_triscv.vector.tuple_nxv8i8_5t: 2020; CHECK: # %bb.0: # %entry 2021; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma 2022; CHECK-NEXT: vssseg5e64.v v8, (a0), a1, v0.t 2023; CHECK-NEXT: ret 2024entry: 2025 tail call void @llvm.riscv.vssseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl, i32 6) 2026 ret void 2027} 2028 2029declare void @llvm.riscv.vssseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 6), ptr, i32, <vscale x 1 x i1>, i32, i32) 2030 2031define void @test_vssseg6_nxv1i64_triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, i32 %offset, i32 %vl) { 2032; CHECK-LABEL: test_vssseg6_nxv1i64_triscv.vector.tuple_nxv8i8_6t: 2033; CHECK: # %bb.0: # %entry 2034; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma 2035; CHECK-NEXT: vssseg6e64.v v8, (a0), a1 2036; CHECK-NEXT: ret 2037entry: 2038 tail call void @llvm.riscv.vssseg6.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, i32 %offset, i32 %vl, i32 6) 2039 ret void 2040} 2041 2042define void @test_vssseg6_mask_nxv1i64_triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, i32 %offset, i32 %vl, <vscale x 1 x i1> %mask) { 2043; CHECK-LABEL: test_vssseg6_mask_nxv1i64_triscv.vector.tuple_nxv8i8_6t: 2044; CHECK: # %bb.0: # %entry 2045; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma 2046; CHECK-NEXT: vssseg6e64.v v8, (a0), a1, v0.t 2047; CHECK-NEXT: ret 2048entry: 2049 tail call void @llvm.riscv.vssseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl, i32 6) 2050 ret void 2051} 2052 2053declare void @llvm.riscv.vssseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 7), ptr, i32, <vscale x 1 x i1>, i32, i32) 2054 2055define void @test_vssseg7_nxv1i64_triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, i32 %offset, i32 %vl) { 2056; CHECK-LABEL: test_vssseg7_nxv1i64_triscv.vector.tuple_nxv8i8_7t: 2057; CHECK: # %bb.0: # %entry 2058; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma 2059; CHECK-NEXT: vssseg7e64.v v8, (a0), a1 2060; CHECK-NEXT: ret 2061entry: 2062 tail call void @llvm.riscv.vssseg7.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, i32 %offset, i32 %vl, i32 6) 2063 ret void 2064} 2065 2066define void @test_vssseg7_mask_nxv1i64_triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, i32 %offset, i32 %vl, <vscale x 1 x i1> %mask) { 2067; CHECK-LABEL: test_vssseg7_mask_nxv1i64_triscv.vector.tuple_nxv8i8_7t: 2068; CHECK: # %bb.0: # %entry 2069; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma 2070; CHECK-NEXT: vssseg7e64.v v8, (a0), a1, v0.t 2071; CHECK-NEXT: ret 2072entry: 2073 tail call void @llvm.riscv.vssseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl, i32 6) 2074 ret void 2075} 2076 2077declare void @llvm.riscv.vssseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 8), ptr, i32, <vscale x 1 x i1>, i32, i32) 2078 2079define void @test_vssseg8_nxv1i64_triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, i32 %offset, i32 %vl) { 2080; CHECK-LABEL: test_vssseg8_nxv1i64_triscv.vector.tuple_nxv8i8_8t: 2081; CHECK: # %bb.0: # %entry 2082; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma 2083; CHECK-NEXT: vssseg8e64.v v8, (a0), a1 2084; CHECK-NEXT: ret 2085entry: 2086 tail call void @llvm.riscv.vssseg8.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, i32 %offset, i32 %vl, i32 6) 2087 ret void 2088} 2089 2090define void @test_vssseg8_mask_nxv1i64_triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, i32 %offset, i32 %vl, <vscale x 1 x i1> %mask) { 2091; CHECK-LABEL: test_vssseg8_mask_nxv1i64_triscv.vector.tuple_nxv8i8_8t: 2092; CHECK: # %bb.0: # %entry 2093; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma 2094; CHECK-NEXT: vssseg8e64.v v8, (a0), a1, v0.t 2095; CHECK-NEXT: ret 2096entry: 2097 tail call void @llvm.riscv.vssseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl, i32 6) 2098 ret void 2099} 2100 2101 2102define void @test_vssseg2_nxv1f16_triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) %val, ptr %base, i32 %offset, i32 %vl) { 2103; CHECK-LABEL: test_vssseg2_nxv1f16_triscv.vector.tuple_nxv2i8_2t: 2104; CHECK: # %bb.0: # %entry 2105; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma 2106; CHECK-NEXT: vssseg2e16.v v8, (a0), a1 2107; CHECK-NEXT: ret 2108entry: 2109 tail call void @llvm.riscv.vssseg2.triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) %val, ptr %base, i32 %offset, i32 %vl, i32 4) 2110 ret void 2111} 2112 2113define void @test_vssseg2_mask_nxv1f16_triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) %val, ptr %base, i32 %offset, i32 %vl, <vscale x 1 x i1> %mask) { 2114; CHECK-LABEL: test_vssseg2_mask_nxv1f16_triscv.vector.tuple_nxv2i8_2t: 2115; CHECK: # %bb.0: # %entry 2116; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma 2117; CHECK-NEXT: vssseg2e16.v v8, (a0), a1, v0.t 2118; CHECK-NEXT: ret 2119entry: 2120 tail call void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) %val, ptr %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl, i32 4) 2121 ret void 2122} 2123 2124 2125define void @test_vssseg2_nxv2f16_triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %val, ptr %base, i32 %offset, i32 %vl) { 2126; CHECK-LABEL: test_vssseg2_nxv2f16_triscv.vector.tuple_nxv4i8_2t: 2127; CHECK: # %bb.0: # %entry 2128; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma 2129; CHECK-NEXT: vssseg2e16.v v8, (a0), a1 2130; CHECK-NEXT: ret 2131entry: 2132 tail call void @llvm.riscv.vssseg2.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %val, ptr %base, i32 %offset, i32 %vl, i32 4) 2133 ret void 2134} 2135 2136define void @test_vssseg2_mask_nxv2f16_triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %val, ptr %base, i32 %offset, i32 %vl, <vscale x 2 x i1> %mask) { 2137; CHECK-LABEL: test_vssseg2_mask_nxv2f16_triscv.vector.tuple_nxv4i8_2t: 2138; CHECK: # %bb.0: # %entry 2139; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma 2140; CHECK-NEXT: vssseg2e16.v v8, (a0), a1, v0.t 2141; CHECK-NEXT: ret 2142entry: 2143 tail call void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %val, ptr %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl, i32 4) 2144 ret void 2145} 2146 2147 2148define void @test_vssseg2_nxv4f16_triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, i32 %offset, i32 %vl) { 2149; CHECK-LABEL: test_vssseg2_nxv4f16_triscv.vector.tuple_nxv8i8_2t: 2150; CHECK: # %bb.0: # %entry 2151; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma 2152; CHECK-NEXT: vssseg2e16.v v8, (a0), a1 2153; CHECK-NEXT: ret 2154entry: 2155 tail call void @llvm.riscv.vssseg2.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, i32 %offset, i32 %vl, i32 4) 2156 ret void 2157} 2158 2159define void @test_vssseg2_mask_nxv4f16_triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, i32 %offset, i32 %vl, <vscale x 4 x i1> %mask) { 2160; CHECK-LABEL: test_vssseg2_mask_nxv4f16_triscv.vector.tuple_nxv8i8_2t: 2161; CHECK: # %bb.0: # %entry 2162; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma 2163; CHECK-NEXT: vssseg2e16.v v8, (a0), a1, v0.t 2164; CHECK-NEXT: ret 2165entry: 2166 tail call void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, i32 %offset, <vscale x 4 x i1> %mask, i32 %vl, i32 4) 2167 ret void 2168} 2169 2170 2171define void @test_vssseg2_nxv8f16_triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, i32 %offset, i32 %vl) { 2172; CHECK-LABEL: test_vssseg2_nxv8f16_triscv.vector.tuple_nxv16i8_2t: 2173; CHECK: # %bb.0: # %entry 2174; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, ma 2175; CHECK-NEXT: vssseg2e16.v v8, (a0), a1 2176; CHECK-NEXT: ret 2177entry: 2178 tail call void @llvm.riscv.vssseg2.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, i32 %offset, i32 %vl, i32 4) 2179 ret void 2180} 2181 2182define void @test_vssseg2_mask_nxv8f16_triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, i32 %offset, i32 %vl, <vscale x 8 x i1> %mask) { 2183; CHECK-LABEL: test_vssseg2_mask_nxv8f16_triscv.vector.tuple_nxv16i8_2t: 2184; CHECK: # %bb.0: # %entry 2185; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, ma 2186; CHECK-NEXT: vssseg2e16.v v8, (a0), a1, v0.t 2187; CHECK-NEXT: ret 2188entry: 2189 tail call void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, i32 %offset, <vscale x 8 x i1> %mask, i32 %vl, i32 4) 2190 ret void 2191} 2192 2193 2194define void @test_vssseg2_nxv16f16_triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, i32 %offset, i32 %vl) { 2195; CHECK-LABEL: test_vssseg2_nxv16f16_triscv.vector.tuple_nxv32i8_2t: 2196; CHECK: # %bb.0: # %entry 2197; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, ma 2198; CHECK-NEXT: vssseg2e16.v v8, (a0), a1 2199; CHECK-NEXT: ret 2200entry: 2201 tail call void @llvm.riscv.vssseg2.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, i32 %offset, i32 %vl, i32 4) 2202 ret void 2203} 2204 2205define void @test_vssseg2_mask_nxv16f16_triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, i32 %offset, i32 %vl, <vscale x 16 x i1> %mask) { 2206; CHECK-LABEL: test_vssseg2_mask_nxv16f16_triscv.vector.tuple_nxv32i8_2t: 2207; CHECK: # %bb.0: # %entry 2208; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, ma 2209; CHECK-NEXT: vssseg2e16.v v8, (a0), a1, v0.t 2210; CHECK-NEXT: ret 2211entry: 2212 tail call void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i1(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, i32 %offset, <vscale x 16 x i1> %mask, i32 %vl, i32 4) 2213 ret void 2214} 2215 2216 2217define void @test_vssseg3_nxv1f16_triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) %val, ptr %base, i32 %offset, i32 %vl) { 2218; CHECK-LABEL: test_vssseg3_nxv1f16_triscv.vector.tuple_nxv2i8_3t: 2219; CHECK: # %bb.0: # %entry 2220; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma 2221; CHECK-NEXT: vssseg3e16.v v8, (a0), a1 2222; CHECK-NEXT: ret 2223entry: 2224 tail call void @llvm.riscv.vssseg3.triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) %val, ptr %base, i32 %offset, i32 %vl, i32 4) 2225 ret void 2226} 2227 2228define void @test_vssseg3_mask_nxv1f16_triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) %val, ptr %base, i32 %offset, i32 %vl, <vscale x 1 x i1> %mask) { 2229; CHECK-LABEL: test_vssseg3_mask_nxv1f16_triscv.vector.tuple_nxv2i8_3t: 2230; CHECK: # %bb.0: # %entry 2231; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma 2232; CHECK-NEXT: vssseg3e16.v v8, (a0), a1, v0.t 2233; CHECK-NEXT: ret 2234entry: 2235 tail call void @llvm.riscv.vssseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) %val, ptr %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl, i32 4) 2236 ret void 2237} 2238 2239 2240define void @test_vssseg3_nxv2f16_triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %val, ptr %base, i32 %offset, i32 %vl) { 2241; CHECK-LABEL: test_vssseg3_nxv2f16_triscv.vector.tuple_nxv4i8_3t: 2242; CHECK: # %bb.0: # %entry 2243; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma 2244; CHECK-NEXT: vssseg3e16.v v8, (a0), a1 2245; CHECK-NEXT: ret 2246entry: 2247 tail call void @llvm.riscv.vssseg3.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %val, ptr %base, i32 %offset, i32 %vl, i32 4) 2248 ret void 2249} 2250 2251define void @test_vssseg3_mask_nxv2f16_triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %val, ptr %base, i32 %offset, i32 %vl, <vscale x 2 x i1> %mask) { 2252; CHECK-LABEL: test_vssseg3_mask_nxv2f16_triscv.vector.tuple_nxv4i8_3t: 2253; CHECK: # %bb.0: # %entry 2254; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma 2255; CHECK-NEXT: vssseg3e16.v v8, (a0), a1, v0.t 2256; CHECK-NEXT: ret 2257entry: 2258 tail call void @llvm.riscv.vssseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %val, ptr %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl, i32 4) 2259 ret void 2260} 2261 2262 2263define void @test_vssseg3_nxv4f16_triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, i32 %offset, i32 %vl) { 2264; CHECK-LABEL: test_vssseg3_nxv4f16_triscv.vector.tuple_nxv8i8_3t: 2265; CHECK: # %bb.0: # %entry 2266; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma 2267; CHECK-NEXT: vssseg3e16.v v8, (a0), a1 2268; CHECK-NEXT: ret 2269entry: 2270 tail call void @llvm.riscv.vssseg3.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, i32 %offset, i32 %vl, i32 4) 2271 ret void 2272} 2273 2274define void @test_vssseg3_mask_nxv4f16_triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, i32 %offset, i32 %vl, <vscale x 4 x i1> %mask) { 2275; CHECK-LABEL: test_vssseg3_mask_nxv4f16_triscv.vector.tuple_nxv8i8_3t: 2276; CHECK: # %bb.0: # %entry 2277; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma 2278; CHECK-NEXT: vssseg3e16.v v8, (a0), a1, v0.t 2279; CHECK-NEXT: ret 2280entry: 2281 tail call void @llvm.riscv.vssseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, i32 %offset, <vscale x 4 x i1> %mask, i32 %vl, i32 4) 2282 ret void 2283} 2284 2285 2286define void @test_vssseg3_nxv8f16_triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, i32 %offset, i32 %vl) { 2287; CHECK-LABEL: test_vssseg3_nxv8f16_triscv.vector.tuple_nxv16i8_3t: 2288; CHECK: # %bb.0: # %entry 2289; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, ma 2290; CHECK-NEXT: vssseg3e16.v v8, (a0), a1 2291; CHECK-NEXT: ret 2292entry: 2293 tail call void @llvm.riscv.vssseg3.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, i32 %offset, i32 %vl, i32 4) 2294 ret void 2295} 2296 2297define void @test_vssseg3_mask_nxv8f16_triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, i32 %offset, i32 %vl, <vscale x 8 x i1> %mask) { 2298; CHECK-LABEL: test_vssseg3_mask_nxv8f16_triscv.vector.tuple_nxv16i8_3t: 2299; CHECK: # %bb.0: # %entry 2300; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, ma 2301; CHECK-NEXT: vssseg3e16.v v8, (a0), a1, v0.t 2302; CHECK-NEXT: ret 2303entry: 2304 tail call void @llvm.riscv.vssseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, i32 %offset, <vscale x 8 x i1> %mask, i32 %vl, i32 4) 2305 ret void 2306} 2307 2308 2309define void @test_vssseg4_nxv1f16_triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) %val, ptr %base, i32 %offset, i32 %vl) { 2310; CHECK-LABEL: test_vssseg4_nxv1f16_triscv.vector.tuple_nxv2i8_4t: 2311; CHECK: # %bb.0: # %entry 2312; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma 2313; CHECK-NEXT: vssseg4e16.v v8, (a0), a1 2314; CHECK-NEXT: ret 2315entry: 2316 tail call void @llvm.riscv.vssseg4.triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) %val, ptr %base, i32 %offset, i32 %vl, i32 4) 2317 ret void 2318} 2319 2320define void @test_vssseg4_mask_nxv1f16_triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) %val, ptr %base, i32 %offset, i32 %vl, <vscale x 1 x i1> %mask) { 2321; CHECK-LABEL: test_vssseg4_mask_nxv1f16_triscv.vector.tuple_nxv2i8_4t: 2322; CHECK: # %bb.0: # %entry 2323; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma 2324; CHECK-NEXT: vssseg4e16.v v8, (a0), a1, v0.t 2325; CHECK-NEXT: ret 2326entry: 2327 tail call void @llvm.riscv.vssseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) %val, ptr %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl, i32 4) 2328 ret void 2329} 2330 2331 2332define void @test_vssseg4_nxv2f16_triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %val, ptr %base, i32 %offset, i32 %vl) { 2333; CHECK-LABEL: test_vssseg4_nxv2f16_triscv.vector.tuple_nxv4i8_4t: 2334; CHECK: # %bb.0: # %entry 2335; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma 2336; CHECK-NEXT: vssseg4e16.v v8, (a0), a1 2337; CHECK-NEXT: ret 2338entry: 2339 tail call void @llvm.riscv.vssseg4.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %val, ptr %base, i32 %offset, i32 %vl, i32 4) 2340 ret void 2341} 2342 2343define void @test_vssseg4_mask_nxv2f16_triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %val, ptr %base, i32 %offset, i32 %vl, <vscale x 2 x i1> %mask) { 2344; CHECK-LABEL: test_vssseg4_mask_nxv2f16_triscv.vector.tuple_nxv4i8_4t: 2345; CHECK: # %bb.0: # %entry 2346; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma 2347; CHECK-NEXT: vssseg4e16.v v8, (a0), a1, v0.t 2348; CHECK-NEXT: ret 2349entry: 2350 tail call void @llvm.riscv.vssseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %val, ptr %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl, i32 4) 2351 ret void 2352} 2353 2354 2355define void @test_vssseg4_nxv4f16_triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, i32 %offset, i32 %vl) { 2356; CHECK-LABEL: test_vssseg4_nxv4f16_triscv.vector.tuple_nxv8i8_4t: 2357; CHECK: # %bb.0: # %entry 2358; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma 2359; CHECK-NEXT: vssseg4e16.v v8, (a0), a1 2360; CHECK-NEXT: ret 2361entry: 2362 tail call void @llvm.riscv.vssseg4.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, i32 %offset, i32 %vl, i32 4) 2363 ret void 2364} 2365 2366define void @test_vssseg4_mask_nxv4f16_triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, i32 %offset, i32 %vl, <vscale x 4 x i1> %mask) { 2367; CHECK-LABEL: test_vssseg4_mask_nxv4f16_triscv.vector.tuple_nxv8i8_4t: 2368; CHECK: # %bb.0: # %entry 2369; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma 2370; CHECK-NEXT: vssseg4e16.v v8, (a0), a1, v0.t 2371; CHECK-NEXT: ret 2372entry: 2373 tail call void @llvm.riscv.vssseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, i32 %offset, <vscale x 4 x i1> %mask, i32 %vl, i32 4) 2374 ret void 2375} 2376 2377 2378define void @test_vssseg4_nxv8f16_triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, i32 %offset, i32 %vl) { 2379; CHECK-LABEL: test_vssseg4_nxv8f16_triscv.vector.tuple_nxv16i8_4t: 2380; CHECK: # %bb.0: # %entry 2381; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, ma 2382; CHECK-NEXT: vssseg4e16.v v8, (a0), a1 2383; CHECK-NEXT: ret 2384entry: 2385 tail call void @llvm.riscv.vssseg4.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, i32 %offset, i32 %vl, i32 4) 2386 ret void 2387} 2388 2389define void @test_vssseg4_mask_nxv8f16_triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, i32 %offset, i32 %vl, <vscale x 8 x i1> %mask) { 2390; CHECK-LABEL: test_vssseg4_mask_nxv8f16_triscv.vector.tuple_nxv16i8_4t: 2391; CHECK: # %bb.0: # %entry 2392; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, ma 2393; CHECK-NEXT: vssseg4e16.v v8, (a0), a1, v0.t 2394; CHECK-NEXT: ret 2395entry: 2396 tail call void @llvm.riscv.vssseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, i32 %offset, <vscale x 8 x i1> %mask, i32 %vl, i32 4) 2397 ret void 2398} 2399 2400 2401define void @test_vssseg5_nxv1f16_triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) %val, ptr %base, i32 %offset, i32 %vl) { 2402; CHECK-LABEL: test_vssseg5_nxv1f16_triscv.vector.tuple_nxv2i8_5t: 2403; CHECK: # %bb.0: # %entry 2404; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma 2405; CHECK-NEXT: vssseg5e16.v v8, (a0), a1 2406; CHECK-NEXT: ret 2407entry: 2408 tail call void @llvm.riscv.vssseg5.triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) %val, ptr %base, i32 %offset, i32 %vl, i32 4) 2409 ret void 2410} 2411 2412define void @test_vssseg5_mask_nxv1f16_triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) %val, ptr %base, i32 %offset, i32 %vl, <vscale x 1 x i1> %mask) { 2413; CHECK-LABEL: test_vssseg5_mask_nxv1f16_triscv.vector.tuple_nxv2i8_5t: 2414; CHECK: # %bb.0: # %entry 2415; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma 2416; CHECK-NEXT: vssseg5e16.v v8, (a0), a1, v0.t 2417; CHECK-NEXT: ret 2418entry: 2419 tail call void @llvm.riscv.vssseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) %val, ptr %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl, i32 4) 2420 ret void 2421} 2422 2423 2424define void @test_vssseg5_nxv2f16_triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) %val, ptr %base, i32 %offset, i32 %vl) { 2425; CHECK-LABEL: test_vssseg5_nxv2f16_triscv.vector.tuple_nxv4i8_5t: 2426; CHECK: # %bb.0: # %entry 2427; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma 2428; CHECK-NEXT: vssseg5e16.v v8, (a0), a1 2429; CHECK-NEXT: ret 2430entry: 2431 tail call void @llvm.riscv.vssseg5.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) %val, ptr %base, i32 %offset, i32 %vl, i32 4) 2432 ret void 2433} 2434 2435define void @test_vssseg5_mask_nxv2f16_triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) %val, ptr %base, i32 %offset, i32 %vl, <vscale x 2 x i1> %mask) { 2436; CHECK-LABEL: test_vssseg5_mask_nxv2f16_triscv.vector.tuple_nxv4i8_5t: 2437; CHECK: # %bb.0: # %entry 2438; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma 2439; CHECK-NEXT: vssseg5e16.v v8, (a0), a1, v0.t 2440; CHECK-NEXT: ret 2441entry: 2442 tail call void @llvm.riscv.vssseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) %val, ptr %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl, i32 4) 2443 ret void 2444} 2445 2446 2447define void @test_vssseg5_nxv4f16_triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, i32 %offset, i32 %vl) { 2448; CHECK-LABEL: test_vssseg5_nxv4f16_triscv.vector.tuple_nxv8i8_5t: 2449; CHECK: # %bb.0: # %entry 2450; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma 2451; CHECK-NEXT: vssseg5e16.v v8, (a0), a1 2452; CHECK-NEXT: ret 2453entry: 2454 tail call void @llvm.riscv.vssseg5.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, i32 %offset, i32 %vl, i32 4) 2455 ret void 2456} 2457 2458define void @test_vssseg5_mask_nxv4f16_triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, i32 %offset, i32 %vl, <vscale x 4 x i1> %mask) { 2459; CHECK-LABEL: test_vssseg5_mask_nxv4f16_triscv.vector.tuple_nxv8i8_5t: 2460; CHECK: # %bb.0: # %entry 2461; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma 2462; CHECK-NEXT: vssseg5e16.v v8, (a0), a1, v0.t 2463; CHECK-NEXT: ret 2464entry: 2465 tail call void @llvm.riscv.vssseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, i32 %offset, <vscale x 4 x i1> %mask, i32 %vl, i32 4) 2466 ret void 2467} 2468 2469 2470define void @test_vssseg6_nxv1f16_triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) %val, ptr %base, i32 %offset, i32 %vl) { 2471; CHECK-LABEL: test_vssseg6_nxv1f16_triscv.vector.tuple_nxv2i8_6t: 2472; CHECK: # %bb.0: # %entry 2473; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma 2474; CHECK-NEXT: vssseg6e16.v v8, (a0), a1 2475; CHECK-NEXT: ret 2476entry: 2477 tail call void @llvm.riscv.vssseg6.triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) %val, ptr %base, i32 %offset, i32 %vl, i32 4) 2478 ret void 2479} 2480 2481define void @test_vssseg6_mask_nxv1f16_triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) %val, ptr %base, i32 %offset, i32 %vl, <vscale x 1 x i1> %mask) { 2482; CHECK-LABEL: test_vssseg6_mask_nxv1f16_triscv.vector.tuple_nxv2i8_6t: 2483; CHECK: # %bb.0: # %entry 2484; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma 2485; CHECK-NEXT: vssseg6e16.v v8, (a0), a1, v0.t 2486; CHECK-NEXT: ret 2487entry: 2488 tail call void @llvm.riscv.vssseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) %val, ptr %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl, i32 4) 2489 ret void 2490} 2491 2492 2493define void @test_vssseg6_nxv2f16_triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) %val, ptr %base, i32 %offset, i32 %vl) { 2494; CHECK-LABEL: test_vssseg6_nxv2f16_triscv.vector.tuple_nxv4i8_6t: 2495; CHECK: # %bb.0: # %entry 2496; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma 2497; CHECK-NEXT: vssseg6e16.v v8, (a0), a1 2498; CHECK-NEXT: ret 2499entry: 2500 tail call void @llvm.riscv.vssseg6.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) %val, ptr %base, i32 %offset, i32 %vl, i32 4) 2501 ret void 2502} 2503 2504define void @test_vssseg6_mask_nxv2f16_triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) %val, ptr %base, i32 %offset, i32 %vl, <vscale x 2 x i1> %mask) { 2505; CHECK-LABEL: test_vssseg6_mask_nxv2f16_triscv.vector.tuple_nxv4i8_6t: 2506; CHECK: # %bb.0: # %entry 2507; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma 2508; CHECK-NEXT: vssseg6e16.v v8, (a0), a1, v0.t 2509; CHECK-NEXT: ret 2510entry: 2511 tail call void @llvm.riscv.vssseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) %val, ptr %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl, i32 4) 2512 ret void 2513} 2514 2515 2516define void @test_vssseg6_nxv4f16_triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, i32 %offset, i32 %vl) { 2517; CHECK-LABEL: test_vssseg6_nxv4f16_triscv.vector.tuple_nxv8i8_6t: 2518; CHECK: # %bb.0: # %entry 2519; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma 2520; CHECK-NEXT: vssseg6e16.v v8, (a0), a1 2521; CHECK-NEXT: ret 2522entry: 2523 tail call void @llvm.riscv.vssseg6.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, i32 %offset, i32 %vl, i32 4) 2524 ret void 2525} 2526 2527define void @test_vssseg6_mask_nxv4f16_triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, i32 %offset, i32 %vl, <vscale x 4 x i1> %mask) { 2528; CHECK-LABEL: test_vssseg6_mask_nxv4f16_triscv.vector.tuple_nxv8i8_6t: 2529; CHECK: # %bb.0: # %entry 2530; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma 2531; CHECK-NEXT: vssseg6e16.v v8, (a0), a1, v0.t 2532; CHECK-NEXT: ret 2533entry: 2534 tail call void @llvm.riscv.vssseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, i32 %offset, <vscale x 4 x i1> %mask, i32 %vl, i32 4) 2535 ret void 2536} 2537 2538 2539define void @test_vssseg7_nxv1f16_triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) %val, ptr %base, i32 %offset, i32 %vl) { 2540; CHECK-LABEL: test_vssseg7_nxv1f16_triscv.vector.tuple_nxv2i8_7t: 2541; CHECK: # %bb.0: # %entry 2542; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma 2543; CHECK-NEXT: vssseg7e16.v v8, (a0), a1 2544; CHECK-NEXT: ret 2545entry: 2546 tail call void @llvm.riscv.vssseg7.triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) %val, ptr %base, i32 %offset, i32 %vl, i32 4) 2547 ret void 2548} 2549 2550define void @test_vssseg7_mask_nxv1f16_triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) %val, ptr %base, i32 %offset, i32 %vl, <vscale x 1 x i1> %mask) { 2551; CHECK-LABEL: test_vssseg7_mask_nxv1f16_triscv.vector.tuple_nxv2i8_7t: 2552; CHECK: # %bb.0: # %entry 2553; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma 2554; CHECK-NEXT: vssseg7e16.v v8, (a0), a1, v0.t 2555; CHECK-NEXT: ret 2556entry: 2557 tail call void @llvm.riscv.vssseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) %val, ptr %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl, i32 4) 2558 ret void 2559} 2560 2561 2562define void @test_vssseg7_nxv2f16_triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) %val, ptr %base, i32 %offset, i32 %vl) { 2563; CHECK-LABEL: test_vssseg7_nxv2f16_triscv.vector.tuple_nxv4i8_7t: 2564; CHECK: # %bb.0: # %entry 2565; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma 2566; CHECK-NEXT: vssseg7e16.v v8, (a0), a1 2567; CHECK-NEXT: ret 2568entry: 2569 tail call void @llvm.riscv.vssseg7.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) %val, ptr %base, i32 %offset, i32 %vl, i32 4) 2570 ret void 2571} 2572 2573define void @test_vssseg7_mask_nxv2f16_triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) %val, ptr %base, i32 %offset, i32 %vl, <vscale x 2 x i1> %mask) { 2574; CHECK-LABEL: test_vssseg7_mask_nxv2f16_triscv.vector.tuple_nxv4i8_7t: 2575; CHECK: # %bb.0: # %entry 2576; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma 2577; CHECK-NEXT: vssseg7e16.v v8, (a0), a1, v0.t 2578; CHECK-NEXT: ret 2579entry: 2580 tail call void @llvm.riscv.vssseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) %val, ptr %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl, i32 4) 2581 ret void 2582} 2583 2584 2585define void @test_vssseg7_nxv4f16_triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, i32 %offset, i32 %vl) { 2586; CHECK-LABEL: test_vssseg7_nxv4f16_triscv.vector.tuple_nxv8i8_7t: 2587; CHECK: # %bb.0: # %entry 2588; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma 2589; CHECK-NEXT: vssseg7e16.v v8, (a0), a1 2590; CHECK-NEXT: ret 2591entry: 2592 tail call void @llvm.riscv.vssseg7.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, i32 %offset, i32 %vl, i32 4) 2593 ret void 2594} 2595 2596define void @test_vssseg7_mask_nxv4f16_triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, i32 %offset, i32 %vl, <vscale x 4 x i1> %mask) { 2597; CHECK-LABEL: test_vssseg7_mask_nxv4f16_triscv.vector.tuple_nxv8i8_7t: 2598; CHECK: # %bb.0: # %entry 2599; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma 2600; CHECK-NEXT: vssseg7e16.v v8, (a0), a1, v0.t 2601; CHECK-NEXT: ret 2602entry: 2603 tail call void @llvm.riscv.vssseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, i32 %offset, <vscale x 4 x i1> %mask, i32 %vl, i32 4) 2604 ret void 2605} 2606 2607 2608define void @test_vssseg8_nxv1f16_triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) %val, ptr %base, i32 %offset, i32 %vl) { 2609; CHECK-LABEL: test_vssseg8_nxv1f16_triscv.vector.tuple_nxv2i8_8t: 2610; CHECK: # %bb.0: # %entry 2611; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma 2612; CHECK-NEXT: vssseg8e16.v v8, (a0), a1 2613; CHECK-NEXT: ret 2614entry: 2615 tail call void @llvm.riscv.vssseg8.triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) %val, ptr %base, i32 %offset, i32 %vl, i32 4) 2616 ret void 2617} 2618 2619define void @test_vssseg8_mask_nxv1f16_triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) %val, ptr %base, i32 %offset, i32 %vl, <vscale x 1 x i1> %mask) { 2620; CHECK-LABEL: test_vssseg8_mask_nxv1f16_triscv.vector.tuple_nxv2i8_8t: 2621; CHECK: # %bb.0: # %entry 2622; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma 2623; CHECK-NEXT: vssseg8e16.v v8, (a0), a1, v0.t 2624; CHECK-NEXT: ret 2625entry: 2626 tail call void @llvm.riscv.vssseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) %val, ptr %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl, i32 4) 2627 ret void 2628} 2629 2630 2631define void @test_vssseg8_nxv2f16_triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %val, ptr %base, i32 %offset, i32 %vl) { 2632; CHECK-LABEL: test_vssseg8_nxv2f16_triscv.vector.tuple_nxv4i8_8t: 2633; CHECK: # %bb.0: # %entry 2634; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma 2635; CHECK-NEXT: vssseg8e16.v v8, (a0), a1 2636; CHECK-NEXT: ret 2637entry: 2638 tail call void @llvm.riscv.vssseg8.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %val, ptr %base, i32 %offset, i32 %vl, i32 4) 2639 ret void 2640} 2641 2642define void @test_vssseg8_mask_nxv2f16_triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %val, ptr %base, i32 %offset, i32 %vl, <vscale x 2 x i1> %mask) { 2643; CHECK-LABEL: test_vssseg8_mask_nxv2f16_triscv.vector.tuple_nxv4i8_8t: 2644; CHECK: # %bb.0: # %entry 2645; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma 2646; CHECK-NEXT: vssseg8e16.v v8, (a0), a1, v0.t 2647; CHECK-NEXT: ret 2648entry: 2649 tail call void @llvm.riscv.vssseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %val, ptr %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl, i32 4) 2650 ret void 2651} 2652 2653 2654define void @test_vssseg8_nxv4f16_triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, i32 %offset, i32 %vl) { 2655; CHECK-LABEL: test_vssseg8_nxv4f16_triscv.vector.tuple_nxv8i8_8t: 2656; CHECK: # %bb.0: # %entry 2657; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma 2658; CHECK-NEXT: vssseg8e16.v v8, (a0), a1 2659; CHECK-NEXT: ret 2660entry: 2661 tail call void @llvm.riscv.vssseg8.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, i32 %offset, i32 %vl, i32 4) 2662 ret void 2663} 2664 2665define void @test_vssseg8_mask_nxv4f16_triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, i32 %offset, i32 %vl, <vscale x 4 x i1> %mask) { 2666; CHECK-LABEL: test_vssseg8_mask_nxv4f16_triscv.vector.tuple_nxv8i8_8t: 2667; CHECK: # %bb.0: # %entry 2668; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma 2669; CHECK-NEXT: vssseg8e16.v v8, (a0), a1, v0.t 2670; CHECK-NEXT: ret 2671entry: 2672 tail call void @llvm.riscv.vssseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, i32 %offset, <vscale x 4 x i1> %mask, i32 %vl, i32 4) 2673 ret void 2674} 2675 2676 2677define void @test_vssseg2_nxv1f32_triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %val, ptr %base, i32 %offset, i32 %vl) { 2678; CHECK-LABEL: test_vssseg2_nxv1f32_triscv.vector.tuple_nxv4i8_2t: 2679; CHECK: # %bb.0: # %entry 2680; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma 2681; CHECK-NEXT: vssseg2e32.v v8, (a0), a1 2682; CHECK-NEXT: ret 2683entry: 2684 tail call void @llvm.riscv.vssseg2.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %val, ptr %base, i32 %offset, i32 %vl, i32 5) 2685 ret void 2686} 2687 2688define void @test_vssseg2_mask_nxv1f32_triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %val, ptr %base, i32 %offset, i32 %vl, <vscale x 1 x i1> %mask) { 2689; CHECK-LABEL: test_vssseg2_mask_nxv1f32_triscv.vector.tuple_nxv4i8_2t: 2690; CHECK: # %bb.0: # %entry 2691; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma 2692; CHECK-NEXT: vssseg2e32.v v8, (a0), a1, v0.t 2693; CHECK-NEXT: ret 2694entry: 2695 tail call void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %val, ptr %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl, i32 5) 2696 ret void 2697} 2698 2699 2700define void @test_vssseg2_nxv2f32_triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, i32 %offset, i32 %vl) { 2701; CHECK-LABEL: test_vssseg2_nxv2f32_triscv.vector.tuple_nxv8i8_2t: 2702; CHECK: # %bb.0: # %entry 2703; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma 2704; CHECK-NEXT: vssseg2e32.v v8, (a0), a1 2705; CHECK-NEXT: ret 2706entry: 2707 tail call void @llvm.riscv.vssseg2.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, i32 %offset, i32 %vl, i32 5) 2708 ret void 2709} 2710 2711define void @test_vssseg2_mask_nxv2f32_triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, i32 %offset, i32 %vl, <vscale x 2 x i1> %mask) { 2712; CHECK-LABEL: test_vssseg2_mask_nxv2f32_triscv.vector.tuple_nxv8i8_2t: 2713; CHECK: # %bb.0: # %entry 2714; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma 2715; CHECK-NEXT: vssseg2e32.v v8, (a0), a1, v0.t 2716; CHECK-NEXT: ret 2717entry: 2718 tail call void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl, i32 5) 2719 ret void 2720} 2721 2722 2723define void @test_vssseg2_nxv4f32_triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, i32 %offset, i32 %vl) { 2724; CHECK-LABEL: test_vssseg2_nxv4f32_triscv.vector.tuple_nxv16i8_2t: 2725; CHECK: # %bb.0: # %entry 2726; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, ma 2727; CHECK-NEXT: vssseg2e32.v v8, (a0), a1 2728; CHECK-NEXT: ret 2729entry: 2730 tail call void @llvm.riscv.vssseg2.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, i32 %offset, i32 %vl, i32 5) 2731 ret void 2732} 2733 2734define void @test_vssseg2_mask_nxv4f32_triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, i32 %offset, i32 %vl, <vscale x 4 x i1> %mask) { 2735; CHECK-LABEL: test_vssseg2_mask_nxv4f32_triscv.vector.tuple_nxv16i8_2t: 2736; CHECK: # %bb.0: # %entry 2737; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, ma 2738; CHECK-NEXT: vssseg2e32.v v8, (a0), a1, v0.t 2739; CHECK-NEXT: ret 2740entry: 2741 tail call void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, i32 %offset, <vscale x 4 x i1> %mask, i32 %vl, i32 5) 2742 ret void 2743} 2744 2745 2746define void @test_vssseg2_nxv8f32_triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, i32 %offset, i32 %vl) { 2747; CHECK-LABEL: test_vssseg2_nxv8f32_triscv.vector.tuple_nxv32i8_2t: 2748; CHECK: # %bb.0: # %entry 2749; CHECK-NEXT: vsetvli zero, a2, e32, m4, ta, ma 2750; CHECK-NEXT: vssseg2e32.v v8, (a0), a1 2751; CHECK-NEXT: ret 2752entry: 2753 tail call void @llvm.riscv.vssseg2.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, i32 %offset, i32 %vl, i32 5) 2754 ret void 2755} 2756 2757define void @test_vssseg2_mask_nxv8f32_triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, i32 %offset, i32 %vl, <vscale x 8 x i1> %mask) { 2758; CHECK-LABEL: test_vssseg2_mask_nxv8f32_triscv.vector.tuple_nxv32i8_2t: 2759; CHECK: # %bb.0: # %entry 2760; CHECK-NEXT: vsetvli zero, a2, e32, m4, ta, ma 2761; CHECK-NEXT: vssseg2e32.v v8, (a0), a1, v0.t 2762; CHECK-NEXT: ret 2763entry: 2764 tail call void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i1(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, i32 %offset, <vscale x 8 x i1> %mask, i32 %vl, i32 5) 2765 ret void 2766} 2767 2768 2769define void @test_vssseg3_nxv1f32_triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %val, ptr %base, i32 %offset, i32 %vl) { 2770; CHECK-LABEL: test_vssseg3_nxv1f32_triscv.vector.tuple_nxv4i8_3t: 2771; CHECK: # %bb.0: # %entry 2772; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma 2773; CHECK-NEXT: vssseg3e32.v v8, (a0), a1 2774; CHECK-NEXT: ret 2775entry: 2776 tail call void @llvm.riscv.vssseg3.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %val, ptr %base, i32 %offset, i32 %vl, i32 5) 2777 ret void 2778} 2779 2780define void @test_vssseg3_mask_nxv1f32_triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %val, ptr %base, i32 %offset, i32 %vl, <vscale x 1 x i1> %mask) { 2781; CHECK-LABEL: test_vssseg3_mask_nxv1f32_triscv.vector.tuple_nxv4i8_3t: 2782; CHECK: # %bb.0: # %entry 2783; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma 2784; CHECK-NEXT: vssseg3e32.v v8, (a0), a1, v0.t 2785; CHECK-NEXT: ret 2786entry: 2787 tail call void @llvm.riscv.vssseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %val, ptr %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl, i32 5) 2788 ret void 2789} 2790 2791 2792define void @test_vssseg3_nxv2f32_triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, i32 %offset, i32 %vl) { 2793; CHECK-LABEL: test_vssseg3_nxv2f32_triscv.vector.tuple_nxv8i8_3t: 2794; CHECK: # %bb.0: # %entry 2795; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma 2796; CHECK-NEXT: vssseg3e32.v v8, (a0), a1 2797; CHECK-NEXT: ret 2798entry: 2799 tail call void @llvm.riscv.vssseg3.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, i32 %offset, i32 %vl, i32 5) 2800 ret void 2801} 2802 2803define void @test_vssseg3_mask_nxv2f32_triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, i32 %offset, i32 %vl, <vscale x 2 x i1> %mask) { 2804; CHECK-LABEL: test_vssseg3_mask_nxv2f32_triscv.vector.tuple_nxv8i8_3t: 2805; CHECK: # %bb.0: # %entry 2806; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma 2807; CHECK-NEXT: vssseg3e32.v v8, (a0), a1, v0.t 2808; CHECK-NEXT: ret 2809entry: 2810 tail call void @llvm.riscv.vssseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl, i32 5) 2811 ret void 2812} 2813 2814 2815define void @test_vssseg3_nxv4f32_triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, i32 %offset, i32 %vl) { 2816; CHECK-LABEL: test_vssseg3_nxv4f32_triscv.vector.tuple_nxv16i8_3t: 2817; CHECK: # %bb.0: # %entry 2818; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, ma 2819; CHECK-NEXT: vssseg3e32.v v8, (a0), a1 2820; CHECK-NEXT: ret 2821entry: 2822 tail call void @llvm.riscv.vssseg3.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, i32 %offset, i32 %vl, i32 5) 2823 ret void 2824} 2825 2826define void @test_vssseg3_mask_nxv4f32_triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, i32 %offset, i32 %vl, <vscale x 4 x i1> %mask) { 2827; CHECK-LABEL: test_vssseg3_mask_nxv4f32_triscv.vector.tuple_nxv16i8_3t: 2828; CHECK: # %bb.0: # %entry 2829; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, ma 2830; CHECK-NEXT: vssseg3e32.v v8, (a0), a1, v0.t 2831; CHECK-NEXT: ret 2832entry: 2833 tail call void @llvm.riscv.vssseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, i32 %offset, <vscale x 4 x i1> %mask, i32 %vl, i32 5) 2834 ret void 2835} 2836 2837 2838define void @test_vssseg4_nxv1f32_triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %val, ptr %base, i32 %offset, i32 %vl) { 2839; CHECK-LABEL: test_vssseg4_nxv1f32_triscv.vector.tuple_nxv4i8_4t: 2840; CHECK: # %bb.0: # %entry 2841; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma 2842; CHECK-NEXT: vssseg4e32.v v8, (a0), a1 2843; CHECK-NEXT: ret 2844entry: 2845 tail call void @llvm.riscv.vssseg4.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %val, ptr %base, i32 %offset, i32 %vl, i32 5) 2846 ret void 2847} 2848 2849define void @test_vssseg4_mask_nxv1f32_triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %val, ptr %base, i32 %offset, i32 %vl, <vscale x 1 x i1> %mask) { 2850; CHECK-LABEL: test_vssseg4_mask_nxv1f32_triscv.vector.tuple_nxv4i8_4t: 2851; CHECK: # %bb.0: # %entry 2852; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma 2853; CHECK-NEXT: vssseg4e32.v v8, (a0), a1, v0.t 2854; CHECK-NEXT: ret 2855entry: 2856 tail call void @llvm.riscv.vssseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %val, ptr %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl, i32 5) 2857 ret void 2858} 2859 2860 2861define void @test_vssseg4_nxv2f32_triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, i32 %offset, i32 %vl) { 2862; CHECK-LABEL: test_vssseg4_nxv2f32_triscv.vector.tuple_nxv8i8_4t: 2863; CHECK: # %bb.0: # %entry 2864; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma 2865; CHECK-NEXT: vssseg4e32.v v8, (a0), a1 2866; CHECK-NEXT: ret 2867entry: 2868 tail call void @llvm.riscv.vssseg4.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, i32 %offset, i32 %vl, i32 5) 2869 ret void 2870} 2871 2872define void @test_vssseg4_mask_nxv2f32_triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, i32 %offset, i32 %vl, <vscale x 2 x i1> %mask) { 2873; CHECK-LABEL: test_vssseg4_mask_nxv2f32_triscv.vector.tuple_nxv8i8_4t: 2874; CHECK: # %bb.0: # %entry 2875; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma 2876; CHECK-NEXT: vssseg4e32.v v8, (a0), a1, v0.t 2877; CHECK-NEXT: ret 2878entry: 2879 tail call void @llvm.riscv.vssseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl, i32 5) 2880 ret void 2881} 2882 2883 2884define void @test_vssseg4_nxv4f32_triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, i32 %offset, i32 %vl) { 2885; CHECK-LABEL: test_vssseg4_nxv4f32_triscv.vector.tuple_nxv16i8_4t: 2886; CHECK: # %bb.0: # %entry 2887; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, ma 2888; CHECK-NEXT: vssseg4e32.v v8, (a0), a1 2889; CHECK-NEXT: ret 2890entry: 2891 tail call void @llvm.riscv.vssseg4.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, i32 %offset, i32 %vl, i32 5) 2892 ret void 2893} 2894 2895define void @test_vssseg4_mask_nxv4f32_triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, i32 %offset, i32 %vl, <vscale x 4 x i1> %mask) { 2896; CHECK-LABEL: test_vssseg4_mask_nxv4f32_triscv.vector.tuple_nxv16i8_4t: 2897; CHECK: # %bb.0: # %entry 2898; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, ma 2899; CHECK-NEXT: vssseg4e32.v v8, (a0), a1, v0.t 2900; CHECK-NEXT: ret 2901entry: 2902 tail call void @llvm.riscv.vssseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, i32 %offset, <vscale x 4 x i1> %mask, i32 %vl, i32 5) 2903 ret void 2904} 2905 2906 2907define void @test_vssseg5_nxv1f32_triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) %val, ptr %base, i32 %offset, i32 %vl) { 2908; CHECK-LABEL: test_vssseg5_nxv1f32_triscv.vector.tuple_nxv4i8_5t: 2909; CHECK: # %bb.0: # %entry 2910; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma 2911; CHECK-NEXT: vssseg5e32.v v8, (a0), a1 2912; CHECK-NEXT: ret 2913entry: 2914 tail call void @llvm.riscv.vssseg5.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) %val, ptr %base, i32 %offset, i32 %vl, i32 5) 2915 ret void 2916} 2917 2918define void @test_vssseg5_mask_nxv1f32_triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) %val, ptr %base, i32 %offset, i32 %vl, <vscale x 1 x i1> %mask) { 2919; CHECK-LABEL: test_vssseg5_mask_nxv1f32_triscv.vector.tuple_nxv4i8_5t: 2920; CHECK: # %bb.0: # %entry 2921; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma 2922; CHECK-NEXT: vssseg5e32.v v8, (a0), a1, v0.t 2923; CHECK-NEXT: ret 2924entry: 2925 tail call void @llvm.riscv.vssseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) %val, ptr %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl, i32 5) 2926 ret void 2927} 2928 2929 2930define void @test_vssseg5_nxv2f32_triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, i32 %offset, i32 %vl) { 2931; CHECK-LABEL: test_vssseg5_nxv2f32_triscv.vector.tuple_nxv8i8_5t: 2932; CHECK: # %bb.0: # %entry 2933; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma 2934; CHECK-NEXT: vssseg5e32.v v8, (a0), a1 2935; CHECK-NEXT: ret 2936entry: 2937 tail call void @llvm.riscv.vssseg5.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, i32 %offset, i32 %vl, i32 5) 2938 ret void 2939} 2940 2941define void @test_vssseg5_mask_nxv2f32_triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, i32 %offset, i32 %vl, <vscale x 2 x i1> %mask) { 2942; CHECK-LABEL: test_vssseg5_mask_nxv2f32_triscv.vector.tuple_nxv8i8_5t: 2943; CHECK: # %bb.0: # %entry 2944; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma 2945; CHECK-NEXT: vssseg5e32.v v8, (a0), a1, v0.t 2946; CHECK-NEXT: ret 2947entry: 2948 tail call void @llvm.riscv.vssseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl, i32 5) 2949 ret void 2950} 2951 2952 2953define void @test_vssseg6_nxv1f32_triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) %val, ptr %base, i32 %offset, i32 %vl) { 2954; CHECK-LABEL: test_vssseg6_nxv1f32_triscv.vector.tuple_nxv4i8_6t: 2955; CHECK: # %bb.0: # %entry 2956; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma 2957; CHECK-NEXT: vssseg6e32.v v8, (a0), a1 2958; CHECK-NEXT: ret 2959entry: 2960 tail call void @llvm.riscv.vssseg6.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) %val, ptr %base, i32 %offset, i32 %vl, i32 5) 2961 ret void 2962} 2963 2964define void @test_vssseg6_mask_nxv1f32_triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) %val, ptr %base, i32 %offset, i32 %vl, <vscale x 1 x i1> %mask) { 2965; CHECK-LABEL: test_vssseg6_mask_nxv1f32_triscv.vector.tuple_nxv4i8_6t: 2966; CHECK: # %bb.0: # %entry 2967; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma 2968; CHECK-NEXT: vssseg6e32.v v8, (a0), a1, v0.t 2969; CHECK-NEXT: ret 2970entry: 2971 tail call void @llvm.riscv.vssseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) %val, ptr %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl, i32 5) 2972 ret void 2973} 2974 2975 2976define void @test_vssseg6_nxv2f32_triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, i32 %offset, i32 %vl) { 2977; CHECK-LABEL: test_vssseg6_nxv2f32_triscv.vector.tuple_nxv8i8_6t: 2978; CHECK: # %bb.0: # %entry 2979; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma 2980; CHECK-NEXT: vssseg6e32.v v8, (a0), a1 2981; CHECK-NEXT: ret 2982entry: 2983 tail call void @llvm.riscv.vssseg6.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, i32 %offset, i32 %vl, i32 5) 2984 ret void 2985} 2986 2987define void @test_vssseg6_mask_nxv2f32_triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, i32 %offset, i32 %vl, <vscale x 2 x i1> %mask) { 2988; CHECK-LABEL: test_vssseg6_mask_nxv2f32_triscv.vector.tuple_nxv8i8_6t: 2989; CHECK: # %bb.0: # %entry 2990; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma 2991; CHECK-NEXT: vssseg6e32.v v8, (a0), a1, v0.t 2992; CHECK-NEXT: ret 2993entry: 2994 tail call void @llvm.riscv.vssseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl, i32 5) 2995 ret void 2996} 2997 2998 2999define void @test_vssseg7_nxv1f32_triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) %val, ptr %base, i32 %offset, i32 %vl) { 3000; CHECK-LABEL: test_vssseg7_nxv1f32_triscv.vector.tuple_nxv4i8_7t: 3001; CHECK: # %bb.0: # %entry 3002; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma 3003; CHECK-NEXT: vssseg7e32.v v8, (a0), a1 3004; CHECK-NEXT: ret 3005entry: 3006 tail call void @llvm.riscv.vssseg7.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) %val, ptr %base, i32 %offset, i32 %vl, i32 5) 3007 ret void 3008} 3009 3010define void @test_vssseg7_mask_nxv1f32_triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) %val, ptr %base, i32 %offset, i32 %vl, <vscale x 1 x i1> %mask) { 3011; CHECK-LABEL: test_vssseg7_mask_nxv1f32_triscv.vector.tuple_nxv4i8_7t: 3012; CHECK: # %bb.0: # %entry 3013; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma 3014; CHECK-NEXT: vssseg7e32.v v8, (a0), a1, v0.t 3015; CHECK-NEXT: ret 3016entry: 3017 tail call void @llvm.riscv.vssseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) %val, ptr %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl, i32 5) 3018 ret void 3019} 3020 3021 3022define void @test_vssseg7_nxv2f32_triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, i32 %offset, i32 %vl) { 3023; CHECK-LABEL: test_vssseg7_nxv2f32_triscv.vector.tuple_nxv8i8_7t: 3024; CHECK: # %bb.0: # %entry 3025; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma 3026; CHECK-NEXT: vssseg7e32.v v8, (a0), a1 3027; CHECK-NEXT: ret 3028entry: 3029 tail call void @llvm.riscv.vssseg7.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, i32 %offset, i32 %vl, i32 5) 3030 ret void 3031} 3032 3033define void @test_vssseg7_mask_nxv2f32_triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, i32 %offset, i32 %vl, <vscale x 2 x i1> %mask) { 3034; CHECK-LABEL: test_vssseg7_mask_nxv2f32_triscv.vector.tuple_nxv8i8_7t: 3035; CHECK: # %bb.0: # %entry 3036; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma 3037; CHECK-NEXT: vssseg7e32.v v8, (a0), a1, v0.t 3038; CHECK-NEXT: ret 3039entry: 3040 tail call void @llvm.riscv.vssseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl, i32 5) 3041 ret void 3042} 3043 3044 3045define void @test_vssseg8_nxv1f32_triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %val, ptr %base, i32 %offset, i32 %vl) { 3046; CHECK-LABEL: test_vssseg8_nxv1f32_triscv.vector.tuple_nxv4i8_8t: 3047; CHECK: # %bb.0: # %entry 3048; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma 3049; CHECK-NEXT: vssseg8e32.v v8, (a0), a1 3050; CHECK-NEXT: ret 3051entry: 3052 tail call void @llvm.riscv.vssseg8.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %val, ptr %base, i32 %offset, i32 %vl, i32 5) 3053 ret void 3054} 3055 3056define void @test_vssseg8_mask_nxv1f32_triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %val, ptr %base, i32 %offset, i32 %vl, <vscale x 1 x i1> %mask) { 3057; CHECK-LABEL: test_vssseg8_mask_nxv1f32_triscv.vector.tuple_nxv4i8_8t: 3058; CHECK: # %bb.0: # %entry 3059; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma 3060; CHECK-NEXT: vssseg8e32.v v8, (a0), a1, v0.t 3061; CHECK-NEXT: ret 3062entry: 3063 tail call void @llvm.riscv.vssseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %val, ptr %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl, i32 5) 3064 ret void 3065} 3066 3067 3068define void @test_vssseg8_nxv2f32_triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, i32 %offset, i32 %vl) { 3069; CHECK-LABEL: test_vssseg8_nxv2f32_triscv.vector.tuple_nxv8i8_8t: 3070; CHECK: # %bb.0: # %entry 3071; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma 3072; CHECK-NEXT: vssseg8e32.v v8, (a0), a1 3073; CHECK-NEXT: ret 3074entry: 3075 tail call void @llvm.riscv.vssseg8.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, i32 %offset, i32 %vl, i32 5) 3076 ret void 3077} 3078 3079define void @test_vssseg8_mask_nxv2f32_triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, i32 %offset, i32 %vl, <vscale x 2 x i1> %mask) { 3080; CHECK-LABEL: test_vssseg8_mask_nxv2f32_triscv.vector.tuple_nxv8i8_8t: 3081; CHECK: # %bb.0: # %entry 3082; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma 3083; CHECK-NEXT: vssseg8e32.v v8, (a0), a1, v0.t 3084; CHECK-NEXT: ret 3085entry: 3086 tail call void @llvm.riscv.vssseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl, i32 5) 3087 ret void 3088} 3089 3090 3091define void @test_vssseg2_nxv1f64_triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, i32 %offset, i32 %vl) { 3092; CHECK-LABEL: test_vssseg2_nxv1f64_triscv.vector.tuple_nxv8i8_2t: 3093; CHECK: # %bb.0: # %entry 3094; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma 3095; CHECK-NEXT: vssseg2e64.v v8, (a0), a1 3096; CHECK-NEXT: ret 3097entry: 3098 tail call void @llvm.riscv.vssseg2.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, i32 %offset, i32 %vl, i32 6) 3099 ret void 3100} 3101 3102define void @test_vssseg2_mask_nxv1f64_triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, i32 %offset, i32 %vl, <vscale x 1 x i1> %mask) { 3103; CHECK-LABEL: test_vssseg2_mask_nxv1f64_triscv.vector.tuple_nxv8i8_2t: 3104; CHECK: # %bb.0: # %entry 3105; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma 3106; CHECK-NEXT: vssseg2e64.v v8, (a0), a1, v0.t 3107; CHECK-NEXT: ret 3108entry: 3109 tail call void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl, i32 6) 3110 ret void 3111} 3112 3113 3114define void @test_vssseg2_nxv2f64_triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, i32 %offset, i32 %vl) { 3115; CHECK-LABEL: test_vssseg2_nxv2f64_triscv.vector.tuple_nxv16i8_2t: 3116; CHECK: # %bb.0: # %entry 3117; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma 3118; CHECK-NEXT: vssseg2e64.v v8, (a0), a1 3119; CHECK-NEXT: ret 3120entry: 3121 tail call void @llvm.riscv.vssseg2.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, i32 %offset, i32 %vl, i32 6) 3122 ret void 3123} 3124 3125define void @test_vssseg2_mask_nxv2f64_triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, i32 %offset, i32 %vl, <vscale x 2 x i1> %mask) { 3126; CHECK-LABEL: test_vssseg2_mask_nxv2f64_triscv.vector.tuple_nxv16i8_2t: 3127; CHECK: # %bb.0: # %entry 3128; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma 3129; CHECK-NEXT: vssseg2e64.v v8, (a0), a1, v0.t 3130; CHECK-NEXT: ret 3131entry: 3132 tail call void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl, i32 6) 3133 ret void 3134} 3135 3136 3137define void @test_vssseg2_nxv4f64_triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, i32 %offset, i32 %vl) { 3138; CHECK-LABEL: test_vssseg2_nxv4f64_triscv.vector.tuple_nxv32i8_2t: 3139; CHECK: # %bb.0: # %entry 3140; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, ma 3141; CHECK-NEXT: vssseg2e64.v v8, (a0), a1 3142; CHECK-NEXT: ret 3143entry: 3144 tail call void @llvm.riscv.vssseg2.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, i32 %offset, i32 %vl, i32 6) 3145 ret void 3146} 3147 3148define void @test_vssseg2_mask_nxv4f64_triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, i32 %offset, i32 %vl, <vscale x 4 x i1> %mask) { 3149; CHECK-LABEL: test_vssseg2_mask_nxv4f64_triscv.vector.tuple_nxv32i8_2t: 3150; CHECK: # %bb.0: # %entry 3151; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, ma 3152; CHECK-NEXT: vssseg2e64.v v8, (a0), a1, v0.t 3153; CHECK-NEXT: ret 3154entry: 3155 tail call void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i1(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, i32 %offset, <vscale x 4 x i1> %mask, i32 %vl, i32 6) 3156 ret void 3157} 3158 3159 3160define void @test_vssseg3_nxv1f64_triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, i32 %offset, i32 %vl) { 3161; CHECK-LABEL: test_vssseg3_nxv1f64_triscv.vector.tuple_nxv8i8_3t: 3162; CHECK: # %bb.0: # %entry 3163; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma 3164; CHECK-NEXT: vssseg3e64.v v8, (a0), a1 3165; CHECK-NEXT: ret 3166entry: 3167 tail call void @llvm.riscv.vssseg3.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, i32 %offset, i32 %vl, i32 6) 3168 ret void 3169} 3170 3171define void @test_vssseg3_mask_nxv1f64_triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, i32 %offset, i32 %vl, <vscale x 1 x i1> %mask) { 3172; CHECK-LABEL: test_vssseg3_mask_nxv1f64_triscv.vector.tuple_nxv8i8_3t: 3173; CHECK: # %bb.0: # %entry 3174; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma 3175; CHECK-NEXT: vssseg3e64.v v8, (a0), a1, v0.t 3176; CHECK-NEXT: ret 3177entry: 3178 tail call void @llvm.riscv.vssseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl, i32 6) 3179 ret void 3180} 3181 3182 3183define void @test_vssseg3_nxv2f64_triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, i32 %offset, i32 %vl) { 3184; CHECK-LABEL: test_vssseg3_nxv2f64_triscv.vector.tuple_nxv16i8_3t: 3185; CHECK: # %bb.0: # %entry 3186; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma 3187; CHECK-NEXT: vssseg3e64.v v8, (a0), a1 3188; CHECK-NEXT: ret 3189entry: 3190 tail call void @llvm.riscv.vssseg3.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, i32 %offset, i32 %vl, i32 6) 3191 ret void 3192} 3193 3194define void @test_vssseg3_mask_nxv2f64_triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, i32 %offset, i32 %vl, <vscale x 2 x i1> %mask) { 3195; CHECK-LABEL: test_vssseg3_mask_nxv2f64_triscv.vector.tuple_nxv16i8_3t: 3196; CHECK: # %bb.0: # %entry 3197; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma 3198; CHECK-NEXT: vssseg3e64.v v8, (a0), a1, v0.t 3199; CHECK-NEXT: ret 3200entry: 3201 tail call void @llvm.riscv.vssseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl, i32 6) 3202 ret void 3203} 3204 3205 3206define void @test_vssseg4_nxv1f64_triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, i32 %offset, i32 %vl) { 3207; CHECK-LABEL: test_vssseg4_nxv1f64_triscv.vector.tuple_nxv8i8_4t: 3208; CHECK: # %bb.0: # %entry 3209; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma 3210; CHECK-NEXT: vssseg4e64.v v8, (a0), a1 3211; CHECK-NEXT: ret 3212entry: 3213 tail call void @llvm.riscv.vssseg4.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, i32 %offset, i32 %vl, i32 6) 3214 ret void 3215} 3216 3217define void @test_vssseg4_mask_nxv1f64_triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, i32 %offset, i32 %vl, <vscale x 1 x i1> %mask) { 3218; CHECK-LABEL: test_vssseg4_mask_nxv1f64_triscv.vector.tuple_nxv8i8_4t: 3219; CHECK: # %bb.0: # %entry 3220; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma 3221; CHECK-NEXT: vssseg4e64.v v8, (a0), a1, v0.t 3222; CHECK-NEXT: ret 3223entry: 3224 tail call void @llvm.riscv.vssseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl, i32 6) 3225 ret void 3226} 3227 3228 3229define void @test_vssseg4_nxv2f64_triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, i32 %offset, i32 %vl) { 3230; CHECK-LABEL: test_vssseg4_nxv2f64_triscv.vector.tuple_nxv16i8_4t: 3231; CHECK: # %bb.0: # %entry 3232; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma 3233; CHECK-NEXT: vssseg4e64.v v8, (a0), a1 3234; CHECK-NEXT: ret 3235entry: 3236 tail call void @llvm.riscv.vssseg4.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, i32 %offset, i32 %vl, i32 6) 3237 ret void 3238} 3239 3240define void @test_vssseg4_mask_nxv2f64_triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, i32 %offset, i32 %vl, <vscale x 2 x i1> %mask) { 3241; CHECK-LABEL: test_vssseg4_mask_nxv2f64_triscv.vector.tuple_nxv16i8_4t: 3242; CHECK: # %bb.0: # %entry 3243; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma 3244; CHECK-NEXT: vssseg4e64.v v8, (a0), a1, v0.t 3245; CHECK-NEXT: ret 3246entry: 3247 tail call void @llvm.riscv.vssseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl, i32 6) 3248 ret void 3249} 3250 3251 3252define void @test_vssseg5_nxv1f64_triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, i32 %offset, i32 %vl) { 3253; CHECK-LABEL: test_vssseg5_nxv1f64_triscv.vector.tuple_nxv8i8_5t: 3254; CHECK: # %bb.0: # %entry 3255; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma 3256; CHECK-NEXT: vssseg5e64.v v8, (a0), a1 3257; CHECK-NEXT: ret 3258entry: 3259 tail call void @llvm.riscv.vssseg5.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, i32 %offset, i32 %vl, i32 6) 3260 ret void 3261} 3262 3263define void @test_vssseg5_mask_nxv1f64_triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, i32 %offset, i32 %vl, <vscale x 1 x i1> %mask) { 3264; CHECK-LABEL: test_vssseg5_mask_nxv1f64_triscv.vector.tuple_nxv8i8_5t: 3265; CHECK: # %bb.0: # %entry 3266; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma 3267; CHECK-NEXT: vssseg5e64.v v8, (a0), a1, v0.t 3268; CHECK-NEXT: ret 3269entry: 3270 tail call void @llvm.riscv.vssseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl, i32 6) 3271 ret void 3272} 3273 3274 3275define void @test_vssseg6_nxv1f64_triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, i32 %offset, i32 %vl) { 3276; CHECK-LABEL: test_vssseg6_nxv1f64_triscv.vector.tuple_nxv8i8_6t: 3277; CHECK: # %bb.0: # %entry 3278; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma 3279; CHECK-NEXT: vssseg6e64.v v8, (a0), a1 3280; CHECK-NEXT: ret 3281entry: 3282 tail call void @llvm.riscv.vssseg6.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, i32 %offset, i32 %vl, i32 6) 3283 ret void 3284} 3285 3286define void @test_vssseg6_mask_nxv1f64_triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, i32 %offset, i32 %vl, <vscale x 1 x i1> %mask) { 3287; CHECK-LABEL: test_vssseg6_mask_nxv1f64_triscv.vector.tuple_nxv8i8_6t: 3288; CHECK: # %bb.0: # %entry 3289; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma 3290; CHECK-NEXT: vssseg6e64.v v8, (a0), a1, v0.t 3291; CHECK-NEXT: ret 3292entry: 3293 tail call void @llvm.riscv.vssseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl, i32 6) 3294 ret void 3295} 3296 3297 3298define void @test_vssseg7_nxv1f64_triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, i32 %offset, i32 %vl) { 3299; CHECK-LABEL: test_vssseg7_nxv1f64_triscv.vector.tuple_nxv8i8_7t: 3300; CHECK: # %bb.0: # %entry 3301; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma 3302; CHECK-NEXT: vssseg7e64.v v8, (a0), a1 3303; CHECK-NEXT: ret 3304entry: 3305 tail call void @llvm.riscv.vssseg7.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, i32 %offset, i32 %vl, i32 6) 3306 ret void 3307} 3308 3309define void @test_vssseg7_mask_nxv1f64_triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, i32 %offset, i32 %vl, <vscale x 1 x i1> %mask) { 3310; CHECK-LABEL: test_vssseg7_mask_nxv1f64_triscv.vector.tuple_nxv8i8_7t: 3311; CHECK: # %bb.0: # %entry 3312; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma 3313; CHECK-NEXT: vssseg7e64.v v8, (a0), a1, v0.t 3314; CHECK-NEXT: ret 3315entry: 3316 tail call void @llvm.riscv.vssseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl, i32 6) 3317 ret void 3318} 3319 3320 3321define void @test_vssseg8_nxv1f64_triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, i32 %offset, i32 %vl) { 3322; CHECK-LABEL: test_vssseg8_nxv1f64_triscv.vector.tuple_nxv8i8_8t: 3323; CHECK: # %bb.0: # %entry 3324; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma 3325; CHECK-NEXT: vssseg8e64.v v8, (a0), a1 3326; CHECK-NEXT: ret 3327entry: 3328 tail call void @llvm.riscv.vssseg8.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, i32 %offset, i32 %vl, i32 6) 3329 ret void 3330} 3331 3332define void @test_vssseg8_mask_nxv1f64_triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, i32 %offset, i32 %vl, <vscale x 1 x i1> %mask) { 3333; CHECK-LABEL: test_vssseg8_mask_nxv1f64_triscv.vector.tuple_nxv8i8_8t: 3334; CHECK: # %bb.0: # %entry 3335; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma 3336; CHECK-NEXT: vssseg8e64.v v8, (a0), a1, v0.t 3337; CHECK-NEXT: ret 3338entry: 3339 tail call void @llvm.riscv.vssseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl, i32 6) 3340 ret void 3341} 3342 3343 3344define void @test_vssseg2_nxv1bf16_triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) %val, ptr %base, i32 %offset, i32 %vl) { 3345; CHECK-LABEL: test_vssseg2_nxv1bf16_triscv.vector.tuple_nxv2i8_2t: 3346; CHECK: # %bb.0: # %entry 3347; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma 3348; CHECK-NEXT: vssseg2e16.v v8, (a0), a1 3349; CHECK-NEXT: ret 3350entry: 3351 tail call void @llvm.riscv.vssseg2.triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) %val, ptr %base, i32 %offset, i32 %vl, i32 4) 3352 ret void 3353} 3354 3355define void @test_vssseg2_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) %val, ptr %base, i32 %offset, i32 %vl, <vscale x 1 x i1> %mask) { 3356; CHECK-LABEL: test_vssseg2_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_2t: 3357; CHECK: # %bb.0: # %entry 3358; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma 3359; CHECK-NEXT: vssseg2e16.v v8, (a0), a1, v0.t 3360; CHECK-NEXT: ret 3361entry: 3362 tail call void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) %val, ptr %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl, i32 4) 3363 ret void 3364} 3365 3366 3367define void @test_vssseg2_nxv2bf16_triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %val, ptr %base, i32 %offset, i32 %vl) { 3368; CHECK-LABEL: test_vssseg2_nxv2bf16_triscv.vector.tuple_nxv4i8_2t: 3369; CHECK: # %bb.0: # %entry 3370; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma 3371; CHECK-NEXT: vssseg2e16.v v8, (a0), a1 3372; CHECK-NEXT: ret 3373entry: 3374 tail call void @llvm.riscv.vssseg2.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %val, ptr %base, i32 %offset, i32 %vl, i32 4) 3375 ret void 3376} 3377 3378define void @test_vssseg2_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %val, ptr %base, i32 %offset, i32 %vl, <vscale x 2 x i1> %mask) { 3379; CHECK-LABEL: test_vssseg2_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_2t: 3380; CHECK: # %bb.0: # %entry 3381; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma 3382; CHECK-NEXT: vssseg2e16.v v8, (a0), a1, v0.t 3383; CHECK-NEXT: ret 3384entry: 3385 tail call void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %val, ptr %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl, i32 4) 3386 ret void 3387} 3388 3389 3390define void @test_vssseg2_nxv4bf16_triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, i32 %offset, i32 %vl) { 3391; CHECK-LABEL: test_vssseg2_nxv4bf16_triscv.vector.tuple_nxv8i8_2t: 3392; CHECK: # %bb.0: # %entry 3393; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma 3394; CHECK-NEXT: vssseg2e16.v v8, (a0), a1 3395; CHECK-NEXT: ret 3396entry: 3397 tail call void @llvm.riscv.vssseg2.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, i32 %offset, i32 %vl, i32 4) 3398 ret void 3399} 3400 3401define void @test_vssseg2_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, i32 %offset, i32 %vl, <vscale x 4 x i1> %mask) { 3402; CHECK-LABEL: test_vssseg2_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_2t: 3403; CHECK: # %bb.0: # %entry 3404; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma 3405; CHECK-NEXT: vssseg2e16.v v8, (a0), a1, v0.t 3406; CHECK-NEXT: ret 3407entry: 3408 tail call void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, i32 %offset, <vscale x 4 x i1> %mask, i32 %vl, i32 4) 3409 ret void 3410} 3411 3412 3413define void @test_vssseg2_nxv8bf16_triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, i32 %offset, i32 %vl) { 3414; CHECK-LABEL: test_vssseg2_nxv8bf16_triscv.vector.tuple_nxv16i8_2t: 3415; CHECK: # %bb.0: # %entry 3416; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, ma 3417; CHECK-NEXT: vssseg2e16.v v8, (a0), a1 3418; CHECK-NEXT: ret 3419entry: 3420 tail call void @llvm.riscv.vssseg2.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, i32 %offset, i32 %vl, i32 4) 3421 ret void 3422} 3423 3424define void @test_vssseg2_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, i32 %offset, i32 %vl, <vscale x 8 x i1> %mask) { 3425; CHECK-LABEL: test_vssseg2_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_2t: 3426; CHECK: # %bb.0: # %entry 3427; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, ma 3428; CHECK-NEXT: vssseg2e16.v v8, (a0), a1, v0.t 3429; CHECK-NEXT: ret 3430entry: 3431 tail call void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, i32 %offset, <vscale x 8 x i1> %mask, i32 %vl, i32 4) 3432 ret void 3433} 3434 3435 3436define void @test_vssseg2_nxv16bf16_triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, i32 %offset, i32 %vl) { 3437; CHECK-LABEL: test_vssseg2_nxv16bf16_triscv.vector.tuple_nxv32i8_2t: 3438; CHECK: # %bb.0: # %entry 3439; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, ma 3440; CHECK-NEXT: vssseg2e16.v v8, (a0), a1 3441; CHECK-NEXT: ret 3442entry: 3443 tail call void @llvm.riscv.vssseg2.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, i32 %offset, i32 %vl, i32 4) 3444 ret void 3445} 3446 3447define void @test_vssseg2_mask_nxv16bf16_triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, i32 %offset, i32 %vl, <vscale x 16 x i1> %mask) { 3448; CHECK-LABEL: test_vssseg2_mask_nxv16bf16_triscv.vector.tuple_nxv32i8_2t: 3449; CHECK: # %bb.0: # %entry 3450; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, ma 3451; CHECK-NEXT: vssseg2e16.v v8, (a0), a1, v0.t 3452; CHECK-NEXT: ret 3453entry: 3454 tail call void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i1(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, i32 %offset, <vscale x 16 x i1> %mask, i32 %vl, i32 4) 3455 ret void 3456} 3457 3458 3459define void @test_vssseg3_nxv1bf16_triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) %val, ptr %base, i32 %offset, i32 %vl) { 3460; CHECK-LABEL: test_vssseg3_nxv1bf16_triscv.vector.tuple_nxv2i8_3t: 3461; CHECK: # %bb.0: # %entry 3462; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma 3463; CHECK-NEXT: vssseg3e16.v v8, (a0), a1 3464; CHECK-NEXT: ret 3465entry: 3466 tail call void @llvm.riscv.vssseg3.triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) %val, ptr %base, i32 %offset, i32 %vl, i32 4) 3467 ret void 3468} 3469 3470define void @test_vssseg3_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) %val, ptr %base, i32 %offset, i32 %vl, <vscale x 1 x i1> %mask) { 3471; CHECK-LABEL: test_vssseg3_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_3t: 3472; CHECK: # %bb.0: # %entry 3473; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma 3474; CHECK-NEXT: vssseg3e16.v v8, (a0), a1, v0.t 3475; CHECK-NEXT: ret 3476entry: 3477 tail call void @llvm.riscv.vssseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) %val, ptr %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl, i32 4) 3478 ret void 3479} 3480 3481 3482define void @test_vssseg3_nxv2bf16_triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %val, ptr %base, i32 %offset, i32 %vl) { 3483; CHECK-LABEL: test_vssseg3_nxv2bf16_triscv.vector.tuple_nxv4i8_3t: 3484; CHECK: # %bb.0: # %entry 3485; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma 3486; CHECK-NEXT: vssseg3e16.v v8, (a0), a1 3487; CHECK-NEXT: ret 3488entry: 3489 tail call void @llvm.riscv.vssseg3.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %val, ptr %base, i32 %offset, i32 %vl, i32 4) 3490 ret void 3491} 3492 3493define void @test_vssseg3_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %val, ptr %base, i32 %offset, i32 %vl, <vscale x 2 x i1> %mask) { 3494; CHECK-LABEL: test_vssseg3_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_3t: 3495; CHECK: # %bb.0: # %entry 3496; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma 3497; CHECK-NEXT: vssseg3e16.v v8, (a0), a1, v0.t 3498; CHECK-NEXT: ret 3499entry: 3500 tail call void @llvm.riscv.vssseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %val, ptr %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl, i32 4) 3501 ret void 3502} 3503 3504 3505define void @test_vssseg3_nxv4bf16_triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, i32 %offset, i32 %vl) { 3506; CHECK-LABEL: test_vssseg3_nxv4bf16_triscv.vector.tuple_nxv8i8_3t: 3507; CHECK: # %bb.0: # %entry 3508; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma 3509; CHECK-NEXT: vssseg3e16.v v8, (a0), a1 3510; CHECK-NEXT: ret 3511entry: 3512 tail call void @llvm.riscv.vssseg3.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, i32 %offset, i32 %vl, i32 4) 3513 ret void 3514} 3515 3516define void @test_vssseg3_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, i32 %offset, i32 %vl, <vscale x 4 x i1> %mask) { 3517; CHECK-LABEL: test_vssseg3_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_3t: 3518; CHECK: # %bb.0: # %entry 3519; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma 3520; CHECK-NEXT: vssseg3e16.v v8, (a0), a1, v0.t 3521; CHECK-NEXT: ret 3522entry: 3523 tail call void @llvm.riscv.vssseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, i32 %offset, <vscale x 4 x i1> %mask, i32 %vl, i32 4) 3524 ret void 3525} 3526 3527 3528define void @test_vssseg3_nxv8bf16_triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, i32 %offset, i32 %vl) { 3529; CHECK-LABEL: test_vssseg3_nxv8bf16_triscv.vector.tuple_nxv16i8_3t: 3530; CHECK: # %bb.0: # %entry 3531; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, ma 3532; CHECK-NEXT: vssseg3e16.v v8, (a0), a1 3533; CHECK-NEXT: ret 3534entry: 3535 tail call void @llvm.riscv.vssseg3.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, i32 %offset, i32 %vl, i32 4) 3536 ret void 3537} 3538 3539define void @test_vssseg3_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, i32 %offset, i32 %vl, <vscale x 8 x i1> %mask) { 3540; CHECK-LABEL: test_vssseg3_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_3t: 3541; CHECK: # %bb.0: # %entry 3542; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, ma 3543; CHECK-NEXT: vssseg3e16.v v8, (a0), a1, v0.t 3544; CHECK-NEXT: ret 3545entry: 3546 tail call void @llvm.riscv.vssseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, i32 %offset, <vscale x 8 x i1> %mask, i32 %vl, i32 4) 3547 ret void 3548} 3549 3550 3551define void @test_vssseg4_nxv1bf16_triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) %val, ptr %base, i32 %offset, i32 %vl) { 3552; CHECK-LABEL: test_vssseg4_nxv1bf16_triscv.vector.tuple_nxv2i8_4t: 3553; CHECK: # %bb.0: # %entry 3554; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma 3555; CHECK-NEXT: vssseg4e16.v v8, (a0), a1 3556; CHECK-NEXT: ret 3557entry: 3558 tail call void @llvm.riscv.vssseg4.triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) %val, ptr %base, i32 %offset, i32 %vl, i32 4) 3559 ret void 3560} 3561 3562define void @test_vssseg4_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) %val, ptr %base, i32 %offset, i32 %vl, <vscale x 1 x i1> %mask) { 3563; CHECK-LABEL: test_vssseg4_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_4t: 3564; CHECK: # %bb.0: # %entry 3565; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma 3566; CHECK-NEXT: vssseg4e16.v v8, (a0), a1, v0.t 3567; CHECK-NEXT: ret 3568entry: 3569 tail call void @llvm.riscv.vssseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) %val, ptr %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl, i32 4) 3570 ret void 3571} 3572 3573 3574define void @test_vssseg4_nxv2bf16_triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %val, ptr %base, i32 %offset, i32 %vl) { 3575; CHECK-LABEL: test_vssseg4_nxv2bf16_triscv.vector.tuple_nxv4i8_4t: 3576; CHECK: # %bb.0: # %entry 3577; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma 3578; CHECK-NEXT: vssseg4e16.v v8, (a0), a1 3579; CHECK-NEXT: ret 3580entry: 3581 tail call void @llvm.riscv.vssseg4.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %val, ptr %base, i32 %offset, i32 %vl, i32 4) 3582 ret void 3583} 3584 3585define void @test_vssseg4_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %val, ptr %base, i32 %offset, i32 %vl, <vscale x 2 x i1> %mask) { 3586; CHECK-LABEL: test_vssseg4_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_4t: 3587; CHECK: # %bb.0: # %entry 3588; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma 3589; CHECK-NEXT: vssseg4e16.v v8, (a0), a1, v0.t 3590; CHECK-NEXT: ret 3591entry: 3592 tail call void @llvm.riscv.vssseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %val, ptr %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl, i32 4) 3593 ret void 3594} 3595 3596 3597define void @test_vssseg4_nxv4bf16_triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, i32 %offset, i32 %vl) { 3598; CHECK-LABEL: test_vssseg4_nxv4bf16_triscv.vector.tuple_nxv8i8_4t: 3599; CHECK: # %bb.0: # %entry 3600; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma 3601; CHECK-NEXT: vssseg4e16.v v8, (a0), a1 3602; CHECK-NEXT: ret 3603entry: 3604 tail call void @llvm.riscv.vssseg4.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, i32 %offset, i32 %vl, i32 4) 3605 ret void 3606} 3607 3608define void @test_vssseg4_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, i32 %offset, i32 %vl, <vscale x 4 x i1> %mask) { 3609; CHECK-LABEL: test_vssseg4_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_4t: 3610; CHECK: # %bb.0: # %entry 3611; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma 3612; CHECK-NEXT: vssseg4e16.v v8, (a0), a1, v0.t 3613; CHECK-NEXT: ret 3614entry: 3615 tail call void @llvm.riscv.vssseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, i32 %offset, <vscale x 4 x i1> %mask, i32 %vl, i32 4) 3616 ret void 3617} 3618 3619 3620define void @test_vssseg4_nxv8bf16_triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, i32 %offset, i32 %vl) { 3621; CHECK-LABEL: test_vssseg4_nxv8bf16_triscv.vector.tuple_nxv16i8_4t: 3622; CHECK: # %bb.0: # %entry 3623; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, ma 3624; CHECK-NEXT: vssseg4e16.v v8, (a0), a1 3625; CHECK-NEXT: ret 3626entry: 3627 tail call void @llvm.riscv.vssseg4.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, i32 %offset, i32 %vl, i32 4) 3628 ret void 3629} 3630 3631define void @test_vssseg4_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, i32 %offset, i32 %vl, <vscale x 8 x i1> %mask) { 3632; CHECK-LABEL: test_vssseg4_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_4t: 3633; CHECK: # %bb.0: # %entry 3634; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, ma 3635; CHECK-NEXT: vssseg4e16.v v8, (a0), a1, v0.t 3636; CHECK-NEXT: ret 3637entry: 3638 tail call void @llvm.riscv.vssseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, i32 %offset, <vscale x 8 x i1> %mask, i32 %vl, i32 4) 3639 ret void 3640} 3641 3642 3643define void @test_vssseg5_nxv1bf16_triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) %val, ptr %base, i32 %offset, i32 %vl) { 3644; CHECK-LABEL: test_vssseg5_nxv1bf16_triscv.vector.tuple_nxv2i8_5t: 3645; CHECK: # %bb.0: # %entry 3646; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma 3647; CHECK-NEXT: vssseg5e16.v v8, (a0), a1 3648; CHECK-NEXT: ret 3649entry: 3650 tail call void @llvm.riscv.vssseg5.triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) %val, ptr %base, i32 %offset, i32 %vl, i32 4) 3651 ret void 3652} 3653 3654define void @test_vssseg5_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) %val, ptr %base, i32 %offset, i32 %vl, <vscale x 1 x i1> %mask) { 3655; CHECK-LABEL: test_vssseg5_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_5t: 3656; CHECK: # %bb.0: # %entry 3657; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma 3658; CHECK-NEXT: vssseg5e16.v v8, (a0), a1, v0.t 3659; CHECK-NEXT: ret 3660entry: 3661 tail call void @llvm.riscv.vssseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) %val, ptr %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl, i32 4) 3662 ret void 3663} 3664 3665 3666define void @test_vssseg5_nxv2bf16_triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) %val, ptr %base, i32 %offset, i32 %vl) { 3667; CHECK-LABEL: test_vssseg5_nxv2bf16_triscv.vector.tuple_nxv4i8_5t: 3668; CHECK: # %bb.0: # %entry 3669; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma 3670; CHECK-NEXT: vssseg5e16.v v8, (a0), a1 3671; CHECK-NEXT: ret 3672entry: 3673 tail call void @llvm.riscv.vssseg5.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) %val, ptr %base, i32 %offset, i32 %vl, i32 4) 3674 ret void 3675} 3676 3677define void @test_vssseg5_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) %val, ptr %base, i32 %offset, i32 %vl, <vscale x 2 x i1> %mask) { 3678; CHECK-LABEL: test_vssseg5_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_5t: 3679; CHECK: # %bb.0: # %entry 3680; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma 3681; CHECK-NEXT: vssseg5e16.v v8, (a0), a1, v0.t 3682; CHECK-NEXT: ret 3683entry: 3684 tail call void @llvm.riscv.vssseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) %val, ptr %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl, i32 4) 3685 ret void 3686} 3687 3688 3689define void @test_vssseg5_nxv4bf16_triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, i32 %offset, i32 %vl) { 3690; CHECK-LABEL: test_vssseg5_nxv4bf16_triscv.vector.tuple_nxv8i8_5t: 3691; CHECK: # %bb.0: # %entry 3692; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma 3693; CHECK-NEXT: vssseg5e16.v v8, (a0), a1 3694; CHECK-NEXT: ret 3695entry: 3696 tail call void @llvm.riscv.vssseg5.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, i32 %offset, i32 %vl, i32 4) 3697 ret void 3698} 3699 3700define void @test_vssseg5_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, i32 %offset, i32 %vl, <vscale x 4 x i1> %mask) { 3701; CHECK-LABEL: test_vssseg5_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_5t: 3702; CHECK: # %bb.0: # %entry 3703; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma 3704; CHECK-NEXT: vssseg5e16.v v8, (a0), a1, v0.t 3705; CHECK-NEXT: ret 3706entry: 3707 tail call void @llvm.riscv.vssseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, i32 %offset, <vscale x 4 x i1> %mask, i32 %vl, i32 4) 3708 ret void 3709} 3710 3711 3712define void @test_vssseg6_nxv1bf16_triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) %val, ptr %base, i32 %offset, i32 %vl) { 3713; CHECK-LABEL: test_vssseg6_nxv1bf16_triscv.vector.tuple_nxv2i8_6t: 3714; CHECK: # %bb.0: # %entry 3715; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma 3716; CHECK-NEXT: vssseg6e16.v v8, (a0), a1 3717; CHECK-NEXT: ret 3718entry: 3719 tail call void @llvm.riscv.vssseg6.triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) %val, ptr %base, i32 %offset, i32 %vl, i32 4) 3720 ret void 3721} 3722 3723define void @test_vssseg6_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) %val, ptr %base, i32 %offset, i32 %vl, <vscale x 1 x i1> %mask) { 3724; CHECK-LABEL: test_vssseg6_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_6t: 3725; CHECK: # %bb.0: # %entry 3726; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma 3727; CHECK-NEXT: vssseg6e16.v v8, (a0), a1, v0.t 3728; CHECK-NEXT: ret 3729entry: 3730 tail call void @llvm.riscv.vssseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) %val, ptr %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl, i32 4) 3731 ret void 3732} 3733 3734 3735define void @test_vssseg6_nxv2bf16_triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) %val, ptr %base, i32 %offset, i32 %vl) { 3736; CHECK-LABEL: test_vssseg6_nxv2bf16_triscv.vector.tuple_nxv4i8_6t: 3737; CHECK: # %bb.0: # %entry 3738; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma 3739; CHECK-NEXT: vssseg6e16.v v8, (a0), a1 3740; CHECK-NEXT: ret 3741entry: 3742 tail call void @llvm.riscv.vssseg6.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) %val, ptr %base, i32 %offset, i32 %vl, i32 4) 3743 ret void 3744} 3745 3746define void @test_vssseg6_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) %val, ptr %base, i32 %offset, i32 %vl, <vscale x 2 x i1> %mask) { 3747; CHECK-LABEL: test_vssseg6_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_6t: 3748; CHECK: # %bb.0: # %entry 3749; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma 3750; CHECK-NEXT: vssseg6e16.v v8, (a0), a1, v0.t 3751; CHECK-NEXT: ret 3752entry: 3753 tail call void @llvm.riscv.vssseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) %val, ptr %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl, i32 4) 3754 ret void 3755} 3756 3757 3758define void @test_vssseg6_nxv4bf16_triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, i32 %offset, i32 %vl) { 3759; CHECK-LABEL: test_vssseg6_nxv4bf16_triscv.vector.tuple_nxv8i8_6t: 3760; CHECK: # %bb.0: # %entry 3761; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma 3762; CHECK-NEXT: vssseg6e16.v v8, (a0), a1 3763; CHECK-NEXT: ret 3764entry: 3765 tail call void @llvm.riscv.vssseg6.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, i32 %offset, i32 %vl, i32 4) 3766 ret void 3767} 3768 3769define void @test_vssseg6_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, i32 %offset, i32 %vl, <vscale x 4 x i1> %mask) { 3770; CHECK-LABEL: test_vssseg6_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_6t: 3771; CHECK: # %bb.0: # %entry 3772; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma 3773; CHECK-NEXT: vssseg6e16.v v8, (a0), a1, v0.t 3774; CHECK-NEXT: ret 3775entry: 3776 tail call void @llvm.riscv.vssseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, i32 %offset, <vscale x 4 x i1> %mask, i32 %vl, i32 4) 3777 ret void 3778} 3779 3780 3781define void @test_vssseg7_nxv1bf16_triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) %val, ptr %base, i32 %offset, i32 %vl) { 3782; CHECK-LABEL: test_vssseg7_nxv1bf16_triscv.vector.tuple_nxv2i8_7t: 3783; CHECK: # %bb.0: # %entry 3784; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma 3785; CHECK-NEXT: vssseg7e16.v v8, (a0), a1 3786; CHECK-NEXT: ret 3787entry: 3788 tail call void @llvm.riscv.vssseg7.triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) %val, ptr %base, i32 %offset, i32 %vl, i32 4) 3789 ret void 3790} 3791 3792define void @test_vssseg7_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) %val, ptr %base, i32 %offset, i32 %vl, <vscale x 1 x i1> %mask) { 3793; CHECK-LABEL: test_vssseg7_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_7t: 3794; CHECK: # %bb.0: # %entry 3795; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma 3796; CHECK-NEXT: vssseg7e16.v v8, (a0), a1, v0.t 3797; CHECK-NEXT: ret 3798entry: 3799 tail call void @llvm.riscv.vssseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) %val, ptr %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl, i32 4) 3800 ret void 3801} 3802 3803 3804define void @test_vssseg7_nxv2bf16_triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) %val, ptr %base, i32 %offset, i32 %vl) { 3805; CHECK-LABEL: test_vssseg7_nxv2bf16_triscv.vector.tuple_nxv4i8_7t: 3806; CHECK: # %bb.0: # %entry 3807; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma 3808; CHECK-NEXT: vssseg7e16.v v8, (a0), a1 3809; CHECK-NEXT: ret 3810entry: 3811 tail call void @llvm.riscv.vssseg7.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) %val, ptr %base, i32 %offset, i32 %vl, i32 4) 3812 ret void 3813} 3814 3815define void @test_vssseg7_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) %val, ptr %base, i32 %offset, i32 %vl, <vscale x 2 x i1> %mask) { 3816; CHECK-LABEL: test_vssseg7_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_7t: 3817; CHECK: # %bb.0: # %entry 3818; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma 3819; CHECK-NEXT: vssseg7e16.v v8, (a0), a1, v0.t 3820; CHECK-NEXT: ret 3821entry: 3822 tail call void @llvm.riscv.vssseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) %val, ptr %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl, i32 4) 3823 ret void 3824} 3825 3826 3827define void @test_vssseg7_nxv4bf16_triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, i32 %offset, i32 %vl) { 3828; CHECK-LABEL: test_vssseg7_nxv4bf16_triscv.vector.tuple_nxv8i8_7t: 3829; CHECK: # %bb.0: # %entry 3830; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma 3831; CHECK-NEXT: vssseg7e16.v v8, (a0), a1 3832; CHECK-NEXT: ret 3833entry: 3834 tail call void @llvm.riscv.vssseg7.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, i32 %offset, i32 %vl, i32 4) 3835 ret void 3836} 3837 3838define void @test_vssseg7_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, i32 %offset, i32 %vl, <vscale x 4 x i1> %mask) { 3839; CHECK-LABEL: test_vssseg7_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_7t: 3840; CHECK: # %bb.0: # %entry 3841; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma 3842; CHECK-NEXT: vssseg7e16.v v8, (a0), a1, v0.t 3843; CHECK-NEXT: ret 3844entry: 3845 tail call void @llvm.riscv.vssseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, i32 %offset, <vscale x 4 x i1> %mask, i32 %vl, i32 4) 3846 ret void 3847} 3848 3849 3850define void @test_vssseg8_nxv1bf16_triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) %val, ptr %base, i32 %offset, i32 %vl) { 3851; CHECK-LABEL: test_vssseg8_nxv1bf16_triscv.vector.tuple_nxv2i8_8t: 3852; CHECK: # %bb.0: # %entry 3853; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma 3854; CHECK-NEXT: vssseg8e16.v v8, (a0), a1 3855; CHECK-NEXT: ret 3856entry: 3857 tail call void @llvm.riscv.vssseg8.triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) %val, ptr %base, i32 %offset, i32 %vl, i32 4) 3858 ret void 3859} 3860 3861define void @test_vssseg8_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) %val, ptr %base, i32 %offset, i32 %vl, <vscale x 1 x i1> %mask) { 3862; CHECK-LABEL: test_vssseg8_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_8t: 3863; CHECK: # %bb.0: # %entry 3864; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma 3865; CHECK-NEXT: vssseg8e16.v v8, (a0), a1, v0.t 3866; CHECK-NEXT: ret 3867entry: 3868 tail call void @llvm.riscv.vssseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) %val, ptr %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl, i32 4) 3869 ret void 3870} 3871 3872 3873define void @test_vssseg8_nxv2bf16_triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %val, ptr %base, i32 %offset, i32 %vl) { 3874; CHECK-LABEL: test_vssseg8_nxv2bf16_triscv.vector.tuple_nxv4i8_8t: 3875; CHECK: # %bb.0: # %entry 3876; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma 3877; CHECK-NEXT: vssseg8e16.v v8, (a0), a1 3878; CHECK-NEXT: ret 3879entry: 3880 tail call void @llvm.riscv.vssseg8.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %val, ptr %base, i32 %offset, i32 %vl, i32 4) 3881 ret void 3882} 3883 3884define void @test_vssseg8_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %val, ptr %base, i32 %offset, i32 %vl, <vscale x 2 x i1> %mask) { 3885; CHECK-LABEL: test_vssseg8_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_8t: 3886; CHECK: # %bb.0: # %entry 3887; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma 3888; CHECK-NEXT: vssseg8e16.v v8, (a0), a1, v0.t 3889; CHECK-NEXT: ret 3890entry: 3891 tail call void @llvm.riscv.vssseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %val, ptr %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl, i32 4) 3892 ret void 3893} 3894 3895 3896define void @test_vssseg8_nxv4bf16_triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, i32 %offset, i32 %vl) { 3897; CHECK-LABEL: test_vssseg8_nxv4bf16_triscv.vector.tuple_nxv8i8_8t: 3898; CHECK: # %bb.0: # %entry 3899; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma 3900; CHECK-NEXT: vssseg8e16.v v8, (a0), a1 3901; CHECK-NEXT: ret 3902entry: 3903 tail call void @llvm.riscv.vssseg8.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, i32 %offset, i32 %vl, i32 4) 3904 ret void 3905} 3906 3907define void @test_vssseg8_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, i32 %offset, i32 %vl, <vscale x 4 x i1> %mask) { 3908; CHECK-LABEL: test_vssseg8_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_8t: 3909; CHECK: # %bb.0: # %entry 3910; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma 3911; CHECK-NEXT: vssseg8e16.v v8, (a0), a1, v0.t 3912; CHECK-NEXT: ret 3913entry: 3914 tail call void @llvm.riscv.vssseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, i32 %offset, <vscale x 4 x i1> %mask, i32 %vl, i32 4) 3915 ret void 3916} 3917 3918