xref: /llvm-project/llvm/test/CodeGen/RISCV/rvv/vsseg-rv64.ll (revision e376f9cb77717146290504da58740c97d9dc7eae)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc -mtriple=riscv64 -mattr=+zve64d,+f,+d,+zvfh,+zvfbfmin \
3; RUN:     -verify-machineinstrs < %s | FileCheck %s
4
5declare void @llvm.riscv.vsseg2.triscv.vector.tuple_nxv1i8_2t(target("riscv.vector.tuple", <vscale x 1 x i8>, 2), ptr, i64, i64)
6declare void @llvm.riscv.vsseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i1(target("riscv.vector.tuple", <vscale x 1 x i8>, 2), ptr, <vscale x 1 x i1>, i64, i64)
7
8define void @test_vsseg2_nxv1i8_triscv.vector.tuple_nxv1i8_2t(target("riscv.vector.tuple", <vscale x 1 x i8>, 2) %val, ptr %base, i64 %vl) {
9; CHECK-LABEL: test_vsseg2_nxv1i8_triscv.vector.tuple_nxv1i8_2t:
10; CHECK:       # %bb.0: # %entry
11; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
12; CHECK-NEXT:    vsseg2e8.v v8, (a0)
13; CHECK-NEXT:    ret
14entry:
15  tail call void @llvm.riscv.vsseg2.triscv.vector.tuple_nxv1i8_2t(target("riscv.vector.tuple", <vscale x 1 x i8>, 2) %val, ptr %base, i64 %vl, i64 3)
16  ret void
17}
18
19define void @test_vsseg2_mask_nxv1i8_triscv.vector.tuple_nxv1i8_2t(target("riscv.vector.tuple", <vscale x 1 x i8>, 2) %val, ptr %base, i64 %vl, <vscale x 1 x i1> %mask) {
20; CHECK-LABEL: test_vsseg2_mask_nxv1i8_triscv.vector.tuple_nxv1i8_2t:
21; CHECK:       # %bb.0: # %entry
22; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
23; CHECK-NEXT:    vsseg2e8.v v8, (a0), v0.t
24; CHECK-NEXT:    ret
25entry:
26  tail call void @llvm.riscv.vsseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i1(target("riscv.vector.tuple", <vscale x 1 x i8>, 2) %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl, i64 3)
27  ret void
28}
29
30define void @test_vsseg2_allonesmask_nxv1i8_triscv.vector.tuple_nxv1i8_2t(target("riscv.vector.tuple", <vscale x 1 x i8>, 2) %val, ptr %base, i64 %vl, <vscale x 1 x i1> %mask) {
31; CHECK-LABEL: test_vsseg2_allonesmask_nxv1i8_triscv.vector.tuple_nxv1i8_2t:
32; CHECK:       # %bb.0: # %entry
33; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
34; CHECK-NEXT:    vsseg2e8.v v8, (a0)
35; CHECK-NEXT:    ret
36entry:
37  tail call void @llvm.riscv.vsseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i1(target("riscv.vector.tuple", <vscale x 1 x i8>, 2) %val, ptr %base, <vscale x 1 x i1> splat (i1 true), i64 %vl, i64 3)
38  ret void
39}
40
41declare void @llvm.riscv.vsseg2.triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", <vscale x 2 x i8>, 2), ptr, i64, i64)
42declare void @llvm.riscv.vsseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 2), ptr, <vscale x 2 x i1>, i64, i64)
43
44define void @test_vsseg2_nxv2i8_triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) %val, ptr %base, i64 %vl) {
45; CHECK-LABEL: test_vsseg2_nxv2i8_triscv.vector.tuple_nxv2i8_2t:
46; CHECK:       # %bb.0: # %entry
47; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
48; CHECK-NEXT:    vsseg2e8.v v8, (a0)
49; CHECK-NEXT:    ret
50entry:
51  tail call void @llvm.riscv.vsseg2.triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) %val, ptr %base, i64 %vl, i64 3)
52  ret void
53}
54
55define void @test_vsseg2_mask_nxv2i8_triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) %val, ptr %base, i64 %vl, <vscale x 2 x i1> %mask) {
56; CHECK-LABEL: test_vsseg2_mask_nxv2i8_triscv.vector.tuple_nxv2i8_2t:
57; CHECK:       # %bb.0: # %entry
58; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
59; CHECK-NEXT:    vsseg2e8.v v8, (a0), v0.t
60; CHECK-NEXT:    ret
61entry:
62  tail call void @llvm.riscv.vsseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl, i64 3)
63  ret void
64}
65
66declare void @llvm.riscv.vsseg2.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", <vscale x 4 x i8>, 2), ptr, i64, i64)
67declare void @llvm.riscv.vsseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 2), ptr, <vscale x 4 x i1>, i64, i64)
68
69define void @test_vsseg2_nxv4i8_triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %val, ptr %base, i64 %vl) {
70; CHECK-LABEL: test_vsseg2_nxv4i8_triscv.vector.tuple_nxv4i8_2t:
71; CHECK:       # %bb.0: # %entry
72; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
73; CHECK-NEXT:    vsseg2e8.v v8, (a0)
74; CHECK-NEXT:    ret
75entry:
76  tail call void @llvm.riscv.vsseg2.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %val, ptr %base, i64 %vl, i64 3)
77  ret void
78}
79
80define void @test_vsseg2_mask_nxv4i8_triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %val, ptr %base, i64 %vl, <vscale x 4 x i1> %mask) {
81; CHECK-LABEL: test_vsseg2_mask_nxv4i8_triscv.vector.tuple_nxv4i8_2t:
82; CHECK:       # %bb.0: # %entry
83; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
84; CHECK-NEXT:    vsseg2e8.v v8, (a0), v0.t
85; CHECK-NEXT:    ret
86entry:
87  tail call void @llvm.riscv.vsseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %val, ptr %base, <vscale x 4 x i1> %mask, i64 %vl, i64 3)
88  ret void
89}
90
91declare void @llvm.riscv.vsseg2.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", <vscale x 8 x i8>, 2), ptr, i64, i64)
92declare void @llvm.riscv.vsseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 2), ptr, <vscale x 8 x i1>, i64, i64)
93
94define void @test_vsseg2_nxv8i8_triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, i64 %vl) {
95; CHECK-LABEL: test_vsseg2_nxv8i8_triscv.vector.tuple_nxv8i8_2t:
96; CHECK:       # %bb.0: # %entry
97; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
98; CHECK-NEXT:    vsseg2e8.v v8, (a0)
99; CHECK-NEXT:    ret
100entry:
101  tail call void @llvm.riscv.vsseg2.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, i64 %vl, i64 3)
102  ret void
103}
104
105define void @test_vsseg2_mask_nxv8i8_triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, i64 %vl, <vscale x 8 x i1> %mask) {
106; CHECK-LABEL: test_vsseg2_mask_nxv8i8_triscv.vector.tuple_nxv8i8_2t:
107; CHECK:       # %bb.0: # %entry
108; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
109; CHECK-NEXT:    vsseg2e8.v v8, (a0), v0.t
110; CHECK-NEXT:    ret
111entry:
112  tail call void @llvm.riscv.vsseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, <vscale x 8 x i1> %mask, i64 %vl, i64 3)
113  ret void
114}
115
116declare void @llvm.riscv.vsseg2.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", <vscale x 16 x i8>, 2), ptr, i64, i64)
117declare void @llvm.riscv.vsseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 2), ptr, <vscale x 16 x i1>, i64, i64)
118
119define void @test_vsseg2_nxv16i8_triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, i64 %vl) {
120; CHECK-LABEL: test_vsseg2_nxv16i8_triscv.vector.tuple_nxv16i8_2t:
121; CHECK:       # %bb.0: # %entry
122; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
123; CHECK-NEXT:    vsseg2e8.v v8, (a0)
124; CHECK-NEXT:    ret
125entry:
126  tail call void @llvm.riscv.vsseg2.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, i64 %vl, i64 3)
127  ret void
128}
129
130define void @test_vsseg2_mask_nxv16i8_triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, i64 %vl, <vscale x 16 x i1> %mask) {
131; CHECK-LABEL: test_vsseg2_mask_nxv16i8_triscv.vector.tuple_nxv16i8_2t:
132; CHECK:       # %bb.0: # %entry
133; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
134; CHECK-NEXT:    vsseg2e8.v v8, (a0), v0.t
135; CHECK-NEXT:    ret
136entry:
137  tail call void @llvm.riscv.vsseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, <vscale x 16 x i1> %mask, i64 %vl, i64 3)
138  ret void
139}
140
141declare void @llvm.riscv.vsseg2.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", <vscale x 32 x i8>, 2), ptr, i64, i64)
142declare void @llvm.riscv.vsseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i1(target("riscv.vector.tuple", <vscale x 32 x i8>, 2), ptr, <vscale x 32 x i1>, i64, i64)
143
144define void @test_vsseg2_nxv32i8_triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, i64 %vl) {
145; CHECK-LABEL: test_vsseg2_nxv32i8_triscv.vector.tuple_nxv32i8_2t:
146; CHECK:       # %bb.0: # %entry
147; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
148; CHECK-NEXT:    vsseg2e8.v v8, (a0)
149; CHECK-NEXT:    ret
150entry:
151  tail call void @llvm.riscv.vsseg2.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, i64 %vl, i64 3)
152  ret void
153}
154
155define void @test_vsseg2_mask_nxv32i8_triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, i64 %vl, <vscale x 32 x i1> %mask) {
156; CHECK-LABEL: test_vsseg2_mask_nxv32i8_triscv.vector.tuple_nxv32i8_2t:
157; CHECK:       # %bb.0: # %entry
158; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
159; CHECK-NEXT:    vsseg2e8.v v8, (a0), v0.t
160; CHECK-NEXT:    ret
161entry:
162  tail call void @llvm.riscv.vsseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i1(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, <vscale x 32 x i1> %mask, i64 %vl, i64 3)
163  ret void
164}
165
166declare void @llvm.riscv.vsseg3.triscv.vector.tuple_nxv1i8_3t(target("riscv.vector.tuple", <vscale x 1 x i8>, 3), ptr, i64, i64)
167declare void @llvm.riscv.vsseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i1(target("riscv.vector.tuple", <vscale x 1 x i8>, 3), ptr, <vscale x 1 x i1>, i64, i64)
168
169define void @test_vsseg3_nxv1i8_triscv.vector.tuple_nxv1i8_3t(target("riscv.vector.tuple", <vscale x 1 x i8>, 3) %val, ptr %base, i64 %vl) {
170; CHECK-LABEL: test_vsseg3_nxv1i8_triscv.vector.tuple_nxv1i8_3t:
171; CHECK:       # %bb.0: # %entry
172; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
173; CHECK-NEXT:    vsseg3e8.v v8, (a0)
174; CHECK-NEXT:    ret
175entry:
176  tail call void @llvm.riscv.vsseg3.triscv.vector.tuple_nxv1i8_3t(target("riscv.vector.tuple", <vscale x 1 x i8>, 3) %val, ptr %base, i64 %vl, i64 3)
177  ret void
178}
179
180define void @test_vsseg3_mask_nxv1i8_triscv.vector.tuple_nxv1i8_3t(target("riscv.vector.tuple", <vscale x 1 x i8>, 3) %val, ptr %base, i64 %vl, <vscale x 1 x i1> %mask) {
181; CHECK-LABEL: test_vsseg3_mask_nxv1i8_triscv.vector.tuple_nxv1i8_3t:
182; CHECK:       # %bb.0: # %entry
183; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
184; CHECK-NEXT:    vsseg3e8.v v8, (a0), v0.t
185; CHECK-NEXT:    ret
186entry:
187  tail call void @llvm.riscv.vsseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i1(target("riscv.vector.tuple", <vscale x 1 x i8>, 3) %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl, i64 3)
188  ret void
189}
190
191declare void @llvm.riscv.vsseg3.triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", <vscale x 2 x i8>, 3), ptr, i64, i64)
192declare void @llvm.riscv.vsseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 3), ptr, <vscale x 2 x i1>, i64, i64)
193
194define void @test_vsseg3_nxv2i8_triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) %val, ptr %base, i64 %vl) {
195; CHECK-LABEL: test_vsseg3_nxv2i8_triscv.vector.tuple_nxv2i8_3t:
196; CHECK:       # %bb.0: # %entry
197; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
198; CHECK-NEXT:    vsseg3e8.v v8, (a0)
199; CHECK-NEXT:    ret
200entry:
201  tail call void @llvm.riscv.vsseg3.triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) %val, ptr %base, i64 %vl, i64 3)
202  ret void
203}
204
205define void @test_vsseg3_mask_nxv2i8_triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) %val, ptr %base, i64 %vl, <vscale x 2 x i1> %mask) {
206; CHECK-LABEL: test_vsseg3_mask_nxv2i8_triscv.vector.tuple_nxv2i8_3t:
207; CHECK:       # %bb.0: # %entry
208; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
209; CHECK-NEXT:    vsseg3e8.v v8, (a0), v0.t
210; CHECK-NEXT:    ret
211entry:
212  tail call void @llvm.riscv.vsseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl, i64 3)
213  ret void
214}
215
216declare void @llvm.riscv.vsseg3.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", <vscale x 4 x i8>, 3), ptr, i64, i64)
217declare void @llvm.riscv.vsseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 3), ptr, <vscale x 4 x i1>, i64, i64)
218
219define void @test_vsseg3_nxv4i8_triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %val, ptr %base, i64 %vl) {
220; CHECK-LABEL: test_vsseg3_nxv4i8_triscv.vector.tuple_nxv4i8_3t:
221; CHECK:       # %bb.0: # %entry
222; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
223; CHECK-NEXT:    vsseg3e8.v v8, (a0)
224; CHECK-NEXT:    ret
225entry:
226  tail call void @llvm.riscv.vsseg3.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %val, ptr %base, i64 %vl, i64 3)
227  ret void
228}
229
230define void @test_vsseg3_mask_nxv4i8_triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %val, ptr %base, i64 %vl, <vscale x 4 x i1> %mask) {
231; CHECK-LABEL: test_vsseg3_mask_nxv4i8_triscv.vector.tuple_nxv4i8_3t:
232; CHECK:       # %bb.0: # %entry
233; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
234; CHECK-NEXT:    vsseg3e8.v v8, (a0), v0.t
235; CHECK-NEXT:    ret
236entry:
237  tail call void @llvm.riscv.vsseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %val, ptr %base, <vscale x 4 x i1> %mask, i64 %vl, i64 3)
238  ret void
239}
240
241declare void @llvm.riscv.vsseg3.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", <vscale x 8 x i8>, 3), ptr, i64, i64)
242declare void @llvm.riscv.vsseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 3), ptr, <vscale x 8 x i1>, i64, i64)
243
244define void @test_vsseg3_nxv8i8_triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, i64 %vl) {
245; CHECK-LABEL: test_vsseg3_nxv8i8_triscv.vector.tuple_nxv8i8_3t:
246; CHECK:       # %bb.0: # %entry
247; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
248; CHECK-NEXT:    vsseg3e8.v v8, (a0)
249; CHECK-NEXT:    ret
250entry:
251  tail call void @llvm.riscv.vsseg3.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, i64 %vl, i64 3)
252  ret void
253}
254
255define void @test_vsseg3_mask_nxv8i8_triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, i64 %vl, <vscale x 8 x i1> %mask) {
256; CHECK-LABEL: test_vsseg3_mask_nxv8i8_triscv.vector.tuple_nxv8i8_3t:
257; CHECK:       # %bb.0: # %entry
258; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
259; CHECK-NEXT:    vsseg3e8.v v8, (a0), v0.t
260; CHECK-NEXT:    ret
261entry:
262  tail call void @llvm.riscv.vsseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, <vscale x 8 x i1> %mask, i64 %vl, i64 3)
263  ret void
264}
265
266declare void @llvm.riscv.vsseg3.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", <vscale x 16 x i8>, 3), ptr, i64, i64)
267declare void @llvm.riscv.vsseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 3), ptr, <vscale x 16 x i1>, i64, i64)
268
269define void @test_vsseg3_nxv16i8_triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, i64 %vl) {
270; CHECK-LABEL: test_vsseg3_nxv16i8_triscv.vector.tuple_nxv16i8_3t:
271; CHECK:       # %bb.0: # %entry
272; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
273; CHECK-NEXT:    vsseg3e8.v v8, (a0)
274; CHECK-NEXT:    ret
275entry:
276  tail call void @llvm.riscv.vsseg3.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, i64 %vl, i64 3)
277  ret void
278}
279
280define void @test_vsseg3_mask_nxv16i8_triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, i64 %vl, <vscale x 16 x i1> %mask) {
281; CHECK-LABEL: test_vsseg3_mask_nxv16i8_triscv.vector.tuple_nxv16i8_3t:
282; CHECK:       # %bb.0: # %entry
283; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
284; CHECK-NEXT:    vsseg3e8.v v8, (a0), v0.t
285; CHECK-NEXT:    ret
286entry:
287  tail call void @llvm.riscv.vsseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, <vscale x 16 x i1> %mask, i64 %vl, i64 3)
288  ret void
289}
290
291define void @test_vsseg3_allonesmask_nxv16i8_triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, i64 %vl, <vscale x 16 x i1> %mask) {
292; CHECK-LABEL: test_vsseg3_allonesmask_nxv16i8_triscv.vector.tuple_nxv16i8_3t:
293; CHECK:       # %bb.0: # %entry
294; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
295; CHECK-NEXT:    vsseg3e8.v v8, (a0)
296; CHECK-NEXT:    ret
297entry:
298  tail call void @llvm.riscv.vsseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, <vscale x 16 x i1> splat (i1 true), i64 %vl, i64 3)
299  ret void
300}
301
302declare void @llvm.riscv.vsseg4.triscv.vector.tuple_nxv1i8_4t(target("riscv.vector.tuple", <vscale x 1 x i8>, 4), ptr, i64, i64)
303declare void @llvm.riscv.vsseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i1(target("riscv.vector.tuple", <vscale x 1 x i8>, 4), ptr, <vscale x 1 x i1>, i64, i64)
304
305define void @test_vsseg4_nxv1i8_triscv.vector.tuple_nxv1i8_4t(target("riscv.vector.tuple", <vscale x 1 x i8>, 4) %val, ptr %base, i64 %vl) {
306; CHECK-LABEL: test_vsseg4_nxv1i8_triscv.vector.tuple_nxv1i8_4t:
307; CHECK:       # %bb.0: # %entry
308; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
309; CHECK-NEXT:    vsseg4e8.v v8, (a0)
310; CHECK-NEXT:    ret
311entry:
312  tail call void @llvm.riscv.vsseg4.triscv.vector.tuple_nxv1i8_4t(target("riscv.vector.tuple", <vscale x 1 x i8>, 4) %val, ptr %base, i64 %vl, i64 3)
313  ret void
314}
315
316define void @test_vsseg4_mask_nxv1i8_triscv.vector.tuple_nxv1i8_4t(target("riscv.vector.tuple", <vscale x 1 x i8>, 4) %val, ptr %base, i64 %vl, <vscale x 1 x i1> %mask) {
317; CHECK-LABEL: test_vsseg4_mask_nxv1i8_triscv.vector.tuple_nxv1i8_4t:
318; CHECK:       # %bb.0: # %entry
319; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
320; CHECK-NEXT:    vsseg4e8.v v8, (a0), v0.t
321; CHECK-NEXT:    ret
322entry:
323  tail call void @llvm.riscv.vsseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i1(target("riscv.vector.tuple", <vscale x 1 x i8>, 4) %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl, i64 3)
324  ret void
325}
326
327define void @test_vsseg4_allonesmask_nxv1i8_triscv.vector.tuple_nxv1i8_4t(target("riscv.vector.tuple", <vscale x 1 x i8>, 4) %val, ptr %base, i64 %vl, <vscale x 1 x i1> %mask) {
328; CHECK-LABEL: test_vsseg4_allonesmask_nxv1i8_triscv.vector.tuple_nxv1i8_4t:
329; CHECK:       # %bb.0: # %entry
330; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
331; CHECK-NEXT:    vsseg4e8.v v8, (a0)
332; CHECK-NEXT:    ret
333entry:
334  tail call void @llvm.riscv.vsseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i1(target("riscv.vector.tuple", <vscale x 1 x i8>, 4) %val, ptr %base, <vscale x 1 x i1> splat (i1 true), i64 %vl, i64 3)
335  ret void
336}
337
338declare void @llvm.riscv.vsseg4.triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", <vscale x 2 x i8>, 4), ptr, i64, i64)
339declare void @llvm.riscv.vsseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 4), ptr, <vscale x 2 x i1>, i64, i64)
340
341define void @test_vsseg4_nxv2i8_triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) %val, ptr %base, i64 %vl) {
342; CHECK-LABEL: test_vsseg4_nxv2i8_triscv.vector.tuple_nxv2i8_4t:
343; CHECK:       # %bb.0: # %entry
344; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
345; CHECK-NEXT:    vsseg4e8.v v8, (a0)
346; CHECK-NEXT:    ret
347entry:
348  tail call void @llvm.riscv.vsseg4.triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) %val, ptr %base, i64 %vl, i64 3)
349  ret void
350}
351
352define void @test_vsseg4_mask_nxv2i8_triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) %val, ptr %base, i64 %vl, <vscale x 2 x i1> %mask) {
353; CHECK-LABEL: test_vsseg4_mask_nxv2i8_triscv.vector.tuple_nxv2i8_4t:
354; CHECK:       # %bb.0: # %entry
355; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
356; CHECK-NEXT:    vsseg4e8.v v8, (a0), v0.t
357; CHECK-NEXT:    ret
358entry:
359  tail call void @llvm.riscv.vsseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl, i64 3)
360  ret void
361}
362
363declare void @llvm.riscv.vsseg4.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", <vscale x 4 x i8>, 4), ptr, i64, i64)
364declare void @llvm.riscv.vsseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 4), ptr, <vscale x 4 x i1>, i64, i64)
365
366define void @test_vsseg4_nxv4i8_triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %val, ptr %base, i64 %vl) {
367; CHECK-LABEL: test_vsseg4_nxv4i8_triscv.vector.tuple_nxv4i8_4t:
368; CHECK:       # %bb.0: # %entry
369; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
370; CHECK-NEXT:    vsseg4e8.v v8, (a0)
371; CHECK-NEXT:    ret
372entry:
373  tail call void @llvm.riscv.vsseg4.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %val, ptr %base, i64 %vl, i64 3)
374  ret void
375}
376
377define void @test_vsseg4_mask_nxv4i8_triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %val, ptr %base, i64 %vl, <vscale x 4 x i1> %mask) {
378; CHECK-LABEL: test_vsseg4_mask_nxv4i8_triscv.vector.tuple_nxv4i8_4t:
379; CHECK:       # %bb.0: # %entry
380; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
381; CHECK-NEXT:    vsseg4e8.v v8, (a0), v0.t
382; CHECK-NEXT:    ret
383entry:
384  tail call void @llvm.riscv.vsseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %val, ptr %base, <vscale x 4 x i1> %mask, i64 %vl, i64 3)
385  ret void
386}
387
388declare void @llvm.riscv.vsseg4.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", <vscale x 8 x i8>, 4), ptr, i64, i64)
389declare void @llvm.riscv.vsseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 4), ptr, <vscale x 8 x i1>, i64, i64)
390
391define void @test_vsseg4_nxv8i8_triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, i64 %vl) {
392; CHECK-LABEL: test_vsseg4_nxv8i8_triscv.vector.tuple_nxv8i8_4t:
393; CHECK:       # %bb.0: # %entry
394; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
395; CHECK-NEXT:    vsseg4e8.v v8, (a0)
396; CHECK-NEXT:    ret
397entry:
398  tail call void @llvm.riscv.vsseg4.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, i64 %vl, i64 3)
399  ret void
400}
401
402define void @test_vsseg4_mask_nxv8i8_triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, i64 %vl, <vscale x 8 x i1> %mask) {
403; CHECK-LABEL: test_vsseg4_mask_nxv8i8_triscv.vector.tuple_nxv8i8_4t:
404; CHECK:       # %bb.0: # %entry
405; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
406; CHECK-NEXT:    vsseg4e8.v v8, (a0), v0.t
407; CHECK-NEXT:    ret
408entry:
409  tail call void @llvm.riscv.vsseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, <vscale x 8 x i1> %mask, i64 %vl, i64 3)
410  ret void
411}
412
413declare void @llvm.riscv.vsseg4.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", <vscale x 16 x i8>, 4), ptr, i64, i64)
414declare void @llvm.riscv.vsseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 4), ptr, <vscale x 16 x i1>, i64, i64)
415
416define void @test_vsseg4_nxv16i8_triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, i64 %vl) {
417; CHECK-LABEL: test_vsseg4_nxv16i8_triscv.vector.tuple_nxv16i8_4t:
418; CHECK:       # %bb.0: # %entry
419; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
420; CHECK-NEXT:    vsseg4e8.v v8, (a0)
421; CHECK-NEXT:    ret
422entry:
423  tail call void @llvm.riscv.vsseg4.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, i64 %vl, i64 3)
424  ret void
425}
426
427define void @test_vsseg4_mask_nxv16i8_triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, i64 %vl, <vscale x 16 x i1> %mask) {
428; CHECK-LABEL: test_vsseg4_mask_nxv16i8_triscv.vector.tuple_nxv16i8_4t:
429; CHECK:       # %bb.0: # %entry
430; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
431; CHECK-NEXT:    vsseg4e8.v v8, (a0), v0.t
432; CHECK-NEXT:    ret
433entry:
434  tail call void @llvm.riscv.vsseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, <vscale x 16 x i1> %mask, i64 %vl, i64 3)
435  ret void
436}
437
438declare void @llvm.riscv.vsseg5.triscv.vector.tuple_nxv1i8_5t(target("riscv.vector.tuple", <vscale x 1 x i8>, 5), ptr, i64, i64)
439declare void @llvm.riscv.vsseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i1(target("riscv.vector.tuple", <vscale x 1 x i8>, 5), ptr, <vscale x 1 x i1>, i64, i64)
440
441define void @test_vsseg5_nxv1i8_triscv.vector.tuple_nxv1i8_5t(target("riscv.vector.tuple", <vscale x 1 x i8>, 5) %val, ptr %base, i64 %vl) {
442; CHECK-LABEL: test_vsseg5_nxv1i8_triscv.vector.tuple_nxv1i8_5t:
443; CHECK:       # %bb.0: # %entry
444; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
445; CHECK-NEXT:    vsseg5e8.v v8, (a0)
446; CHECK-NEXT:    ret
447entry:
448  tail call void @llvm.riscv.vsseg5.triscv.vector.tuple_nxv1i8_5t(target("riscv.vector.tuple", <vscale x 1 x i8>, 5) %val, ptr %base, i64 %vl, i64 3)
449  ret void
450}
451
452define void @test_vsseg5_mask_nxv1i8_triscv.vector.tuple_nxv1i8_5t(target("riscv.vector.tuple", <vscale x 1 x i8>, 5) %val, ptr %base, i64 %vl, <vscale x 1 x i1> %mask) {
453; CHECK-LABEL: test_vsseg5_mask_nxv1i8_triscv.vector.tuple_nxv1i8_5t:
454; CHECK:       # %bb.0: # %entry
455; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
456; CHECK-NEXT:    vsseg5e8.v v8, (a0), v0.t
457; CHECK-NEXT:    ret
458entry:
459  tail call void @llvm.riscv.vsseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i1(target("riscv.vector.tuple", <vscale x 1 x i8>, 5) %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl, i64 3)
460  ret void
461}
462
463define void @test_vsseg5_allonesmask_nxv1i8_triscv.vector.tuple_nxv1i8_5t(target("riscv.vector.tuple", <vscale x 1 x i8>, 5) %val, ptr %base, i64 %vl, <vscale x 1 x i1> %mask) {
464; CHECK-LABEL: test_vsseg5_allonesmask_nxv1i8_triscv.vector.tuple_nxv1i8_5t:
465; CHECK:       # %bb.0: # %entry
466; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
467; CHECK-NEXT:    vsseg5e8.v v8, (a0)
468; CHECK-NEXT:    ret
469entry:
470  tail call void @llvm.riscv.vsseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i1(target("riscv.vector.tuple", <vscale x 1 x i8>, 5) %val, ptr %base, <vscale x 1 x i1> splat (i1 true), i64 %vl, i64 3)
471  ret void
472}
473
474declare void @llvm.riscv.vsseg5.triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", <vscale x 2 x i8>, 5), ptr, i64, i64)
475declare void @llvm.riscv.vsseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 5), ptr, <vscale x 2 x i1>, i64, i64)
476
477define void @test_vsseg5_nxv2i8_triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) %val, ptr %base, i64 %vl) {
478; CHECK-LABEL: test_vsseg5_nxv2i8_triscv.vector.tuple_nxv2i8_5t:
479; CHECK:       # %bb.0: # %entry
480; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
481; CHECK-NEXT:    vsseg5e8.v v8, (a0)
482; CHECK-NEXT:    ret
483entry:
484  tail call void @llvm.riscv.vsseg5.triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) %val, ptr %base, i64 %vl, i64 3)
485  ret void
486}
487
488define void @test_vsseg5_mask_nxv2i8_triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) %val, ptr %base, i64 %vl, <vscale x 2 x i1> %mask) {
489; CHECK-LABEL: test_vsseg5_mask_nxv2i8_triscv.vector.tuple_nxv2i8_5t:
490; CHECK:       # %bb.0: # %entry
491; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
492; CHECK-NEXT:    vsseg5e8.v v8, (a0), v0.t
493; CHECK-NEXT:    ret
494entry:
495  tail call void @llvm.riscv.vsseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl, i64 3)
496  ret void
497}
498
499declare void @llvm.riscv.vsseg5.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", <vscale x 4 x i8>, 5), ptr, i64, i64)
500declare void @llvm.riscv.vsseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 5), ptr, <vscale x 4 x i1>, i64, i64)
501
502define void @test_vsseg5_nxv4i8_triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) %val, ptr %base, i64 %vl) {
503; CHECK-LABEL: test_vsseg5_nxv4i8_triscv.vector.tuple_nxv4i8_5t:
504; CHECK:       # %bb.0: # %entry
505; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
506; CHECK-NEXT:    vsseg5e8.v v8, (a0)
507; CHECK-NEXT:    ret
508entry:
509  tail call void @llvm.riscv.vsseg5.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) %val, ptr %base, i64 %vl, i64 3)
510  ret void
511}
512
513define void @test_vsseg5_mask_nxv4i8_triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) %val, ptr %base, i64 %vl, <vscale x 4 x i1> %mask) {
514; CHECK-LABEL: test_vsseg5_mask_nxv4i8_triscv.vector.tuple_nxv4i8_5t:
515; CHECK:       # %bb.0: # %entry
516; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
517; CHECK-NEXT:    vsseg5e8.v v8, (a0), v0.t
518; CHECK-NEXT:    ret
519entry:
520  tail call void @llvm.riscv.vsseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) %val, ptr %base, <vscale x 4 x i1> %mask, i64 %vl, i64 3)
521  ret void
522}
523
524declare void @llvm.riscv.vsseg5.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", <vscale x 8 x i8>, 5), ptr, i64, i64)
525declare void @llvm.riscv.vsseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 5), ptr, <vscale x 8 x i1>, i64, i64)
526
527define void @test_vsseg5_nxv8i8_triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, i64 %vl) {
528; CHECK-LABEL: test_vsseg5_nxv8i8_triscv.vector.tuple_nxv8i8_5t:
529; CHECK:       # %bb.0: # %entry
530; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
531; CHECK-NEXT:    vsseg5e8.v v8, (a0)
532; CHECK-NEXT:    ret
533entry:
534  tail call void @llvm.riscv.vsseg5.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, i64 %vl, i64 3)
535  ret void
536}
537
538define void @test_vsseg5_mask_nxv8i8_triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, i64 %vl, <vscale x 8 x i1> %mask) {
539; CHECK-LABEL: test_vsseg5_mask_nxv8i8_triscv.vector.tuple_nxv8i8_5t:
540; CHECK:       # %bb.0: # %entry
541; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
542; CHECK-NEXT:    vsseg5e8.v v8, (a0), v0.t
543; CHECK-NEXT:    ret
544entry:
545  tail call void @llvm.riscv.vsseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, <vscale x 8 x i1> %mask, i64 %vl, i64 3)
546  ret void
547}
548
549declare void @llvm.riscv.vsseg6.triscv.vector.tuple_nxv1i8_6t(target("riscv.vector.tuple", <vscale x 1 x i8>, 6), ptr, i64, i64)
550declare void @llvm.riscv.vsseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i1(target("riscv.vector.tuple", <vscale x 1 x i8>, 6), ptr, <vscale x 1 x i1>, i64, i64)
551
552define void @test_vsseg6_nxv1i8_triscv.vector.tuple_nxv1i8_6t(target("riscv.vector.tuple", <vscale x 1 x i8>, 6) %val, ptr %base, i64 %vl) {
553; CHECK-LABEL: test_vsseg6_nxv1i8_triscv.vector.tuple_nxv1i8_6t:
554; CHECK:       # %bb.0: # %entry
555; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
556; CHECK-NEXT:    vsseg6e8.v v8, (a0)
557; CHECK-NEXT:    ret
558entry:
559  tail call void @llvm.riscv.vsseg6.triscv.vector.tuple_nxv1i8_6t(target("riscv.vector.tuple", <vscale x 1 x i8>, 6) %val, ptr %base, i64 %vl, i64 3)
560  ret void
561}
562
563define void @test_vsseg6_mask_nxv1i8_triscv.vector.tuple_nxv1i8_6t(target("riscv.vector.tuple", <vscale x 1 x i8>, 6) %val, ptr %base, i64 %vl, <vscale x 1 x i1> %mask) {
564; CHECK-LABEL: test_vsseg6_mask_nxv1i8_triscv.vector.tuple_nxv1i8_6t:
565; CHECK:       # %bb.0: # %entry
566; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
567; CHECK-NEXT:    vsseg6e8.v v8, (a0), v0.t
568; CHECK-NEXT:    ret
569entry:
570  tail call void @llvm.riscv.vsseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i1(target("riscv.vector.tuple", <vscale x 1 x i8>, 6) %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl, i64 3)
571  ret void
572}
573
574define void @test_vsseg6_allonesmask_nxv1i8_triscv.vector.tuple_nxv1i8_6t(target("riscv.vector.tuple", <vscale x 1 x i8>, 6) %val, ptr %base, i64 %vl, <vscale x 1 x i1> %mask) {
575; CHECK-LABEL: test_vsseg6_allonesmask_nxv1i8_triscv.vector.tuple_nxv1i8_6t:
576; CHECK:       # %bb.0: # %entry
577; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
578; CHECK-NEXT:    vsseg6e8.v v8, (a0)
579; CHECK-NEXT:    ret
580entry:
581  tail call void @llvm.riscv.vsseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i1(target("riscv.vector.tuple", <vscale x 1 x i8>, 6) %val, ptr %base, <vscale x 1 x i1> splat (i1 true), i64 %vl, i64 3)
582  ret void
583}
584
585declare void @llvm.riscv.vsseg6.triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", <vscale x 2 x i8>, 6), ptr, i64, i64)
586declare void @llvm.riscv.vsseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 6), ptr, <vscale x 2 x i1>, i64, i64)
587
588define void @test_vsseg6_nxv2i8_triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) %val, ptr %base, i64 %vl) {
589; CHECK-LABEL: test_vsseg6_nxv2i8_triscv.vector.tuple_nxv2i8_6t:
590; CHECK:       # %bb.0: # %entry
591; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
592; CHECK-NEXT:    vsseg6e8.v v8, (a0)
593; CHECK-NEXT:    ret
594entry:
595  tail call void @llvm.riscv.vsseg6.triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) %val, ptr %base, i64 %vl, i64 3)
596  ret void
597}
598
599define void @test_vsseg6_mask_nxv2i8_triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) %val, ptr %base, i64 %vl, <vscale x 2 x i1> %mask) {
600; CHECK-LABEL: test_vsseg6_mask_nxv2i8_triscv.vector.tuple_nxv2i8_6t:
601; CHECK:       # %bb.0: # %entry
602; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
603; CHECK-NEXT:    vsseg6e8.v v8, (a0), v0.t
604; CHECK-NEXT:    ret
605entry:
606  tail call void @llvm.riscv.vsseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl, i64 3)
607  ret void
608}
609
610declare void @llvm.riscv.vsseg6.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", <vscale x 4 x i8>, 6), ptr, i64, i64)
611declare void @llvm.riscv.vsseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 6), ptr, <vscale x 4 x i1>, i64, i64)
612
613define void @test_vsseg6_nxv4i8_triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) %val, ptr %base, i64 %vl) {
614; CHECK-LABEL: test_vsseg6_nxv4i8_triscv.vector.tuple_nxv4i8_6t:
615; CHECK:       # %bb.0: # %entry
616; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
617; CHECK-NEXT:    vsseg6e8.v v8, (a0)
618; CHECK-NEXT:    ret
619entry:
620  tail call void @llvm.riscv.vsseg6.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) %val, ptr %base, i64 %vl, i64 3)
621  ret void
622}
623
624define void @test_vsseg6_mask_nxv4i8_triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) %val, ptr %base, i64 %vl, <vscale x 4 x i1> %mask) {
625; CHECK-LABEL: test_vsseg6_mask_nxv4i8_triscv.vector.tuple_nxv4i8_6t:
626; CHECK:       # %bb.0: # %entry
627; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
628; CHECK-NEXT:    vsseg6e8.v v8, (a0), v0.t
629; CHECK-NEXT:    ret
630entry:
631  tail call void @llvm.riscv.vsseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) %val, ptr %base, <vscale x 4 x i1> %mask, i64 %vl, i64 3)
632  ret void
633}
634
635declare void @llvm.riscv.vsseg6.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", <vscale x 8 x i8>, 6), ptr, i64, i64)
636declare void @llvm.riscv.vsseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 6), ptr, <vscale x 8 x i1>, i64, i64)
637
638define void @test_vsseg6_nxv8i8_triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, i64 %vl) {
639; CHECK-LABEL: test_vsseg6_nxv8i8_triscv.vector.tuple_nxv8i8_6t:
640; CHECK:       # %bb.0: # %entry
641; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
642; CHECK-NEXT:    vsseg6e8.v v8, (a0)
643; CHECK-NEXT:    ret
644entry:
645  tail call void @llvm.riscv.vsseg6.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, i64 %vl, i64 3)
646  ret void
647}
648
649define void @test_vsseg6_mask_nxv8i8_triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, i64 %vl, <vscale x 8 x i1> %mask) {
650; CHECK-LABEL: test_vsseg6_mask_nxv8i8_triscv.vector.tuple_nxv8i8_6t:
651; CHECK:       # %bb.0: # %entry
652; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
653; CHECK-NEXT:    vsseg6e8.v v8, (a0), v0.t
654; CHECK-NEXT:    ret
655entry:
656  tail call void @llvm.riscv.vsseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, <vscale x 8 x i1> %mask, i64 %vl, i64 3)
657  ret void
658}
659
660declare void @llvm.riscv.vsseg7.triscv.vector.tuple_nxv1i8_7t(target("riscv.vector.tuple", <vscale x 1 x i8>, 7), ptr, i64, i64)
661declare void @llvm.riscv.vsseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i1(target("riscv.vector.tuple", <vscale x 1 x i8>, 7), ptr, <vscale x 1 x i1>, i64, i64)
662
663define void @test_vsseg7_nxv1i8_triscv.vector.tuple_nxv1i8_7t(target("riscv.vector.tuple", <vscale x 1 x i8>, 7) %val, ptr %base, i64 %vl) {
664; CHECK-LABEL: test_vsseg7_nxv1i8_triscv.vector.tuple_nxv1i8_7t:
665; CHECK:       # %bb.0: # %entry
666; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
667; CHECK-NEXT:    vsseg7e8.v v8, (a0)
668; CHECK-NEXT:    ret
669entry:
670  tail call void @llvm.riscv.vsseg7.triscv.vector.tuple_nxv1i8_7t(target("riscv.vector.tuple", <vscale x 1 x i8>, 7) %val, ptr %base, i64 %vl, i64 3)
671  ret void
672}
673
674define void @test_vsseg7_mask_nxv1i8_triscv.vector.tuple_nxv1i8_7t(target("riscv.vector.tuple", <vscale x 1 x i8>, 7) %val, ptr %base, i64 %vl, <vscale x 1 x i1> %mask) {
675; CHECK-LABEL: test_vsseg7_mask_nxv1i8_triscv.vector.tuple_nxv1i8_7t:
676; CHECK:       # %bb.0: # %entry
677; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
678; CHECK-NEXT:    vsseg7e8.v v8, (a0), v0.t
679; CHECK-NEXT:    ret
680entry:
681  tail call void @llvm.riscv.vsseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i1(target("riscv.vector.tuple", <vscale x 1 x i8>, 7) %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl, i64 3)
682  ret void
683}
684
685define void @test_vsseg7_allonesmask_nxv1i8_triscv.vector.tuple_nxv1i8_7t(target("riscv.vector.tuple", <vscale x 1 x i8>, 7) %val, ptr %base, i64 %vl, <vscale x 1 x i1> %mask) {
686; CHECK-LABEL: test_vsseg7_allonesmask_nxv1i8_triscv.vector.tuple_nxv1i8_7t:
687; CHECK:       # %bb.0: # %entry
688; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
689; CHECK-NEXT:    vsseg7e8.v v8, (a0)
690; CHECK-NEXT:    ret
691entry:
692  tail call void @llvm.riscv.vsseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i1(target("riscv.vector.tuple", <vscale x 1 x i8>, 7) %val, ptr %base, <vscale x 1 x i1> splat (i1 true), i64 %vl, i64 3)
693  ret void
694}
695
696declare void @llvm.riscv.vsseg7.triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", <vscale x 2 x i8>, 7), ptr, i64, i64)
697declare void @llvm.riscv.vsseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 7), ptr, <vscale x 2 x i1>, i64, i64)
698
699define void @test_vsseg7_nxv2i8_triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) %val, ptr %base, i64 %vl) {
700; CHECK-LABEL: test_vsseg7_nxv2i8_triscv.vector.tuple_nxv2i8_7t:
701; CHECK:       # %bb.0: # %entry
702; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
703; CHECK-NEXT:    vsseg7e8.v v8, (a0)
704; CHECK-NEXT:    ret
705entry:
706  tail call void @llvm.riscv.vsseg7.triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) %val, ptr %base, i64 %vl, i64 3)
707  ret void
708}
709
710define void @test_vsseg7_mask_nxv2i8_triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) %val, ptr %base, i64 %vl, <vscale x 2 x i1> %mask) {
711; CHECK-LABEL: test_vsseg7_mask_nxv2i8_triscv.vector.tuple_nxv2i8_7t:
712; CHECK:       # %bb.0: # %entry
713; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
714; CHECK-NEXT:    vsseg7e8.v v8, (a0), v0.t
715; CHECK-NEXT:    ret
716entry:
717  tail call void @llvm.riscv.vsseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl, i64 3)
718  ret void
719}
720
721declare void @llvm.riscv.vsseg7.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", <vscale x 4 x i8>, 7), ptr, i64, i64)
722declare void @llvm.riscv.vsseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 7), ptr, <vscale x 4 x i1>, i64, i64)
723
724define void @test_vsseg7_nxv4i8_triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) %val, ptr %base, i64 %vl) {
725; CHECK-LABEL: test_vsseg7_nxv4i8_triscv.vector.tuple_nxv4i8_7t:
726; CHECK:       # %bb.0: # %entry
727; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
728; CHECK-NEXT:    vsseg7e8.v v8, (a0)
729; CHECK-NEXT:    ret
730entry:
731  tail call void @llvm.riscv.vsseg7.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) %val, ptr %base, i64 %vl, i64 3)
732  ret void
733}
734
735define void @test_vsseg7_mask_nxv4i8_triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) %val, ptr %base, i64 %vl, <vscale x 4 x i1> %mask) {
736; CHECK-LABEL: test_vsseg7_mask_nxv4i8_triscv.vector.tuple_nxv4i8_7t:
737; CHECK:       # %bb.0: # %entry
738; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
739; CHECK-NEXT:    vsseg7e8.v v8, (a0), v0.t
740; CHECK-NEXT:    ret
741entry:
742  tail call void @llvm.riscv.vsseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) %val, ptr %base, <vscale x 4 x i1> %mask, i64 %vl, i64 3)
743  ret void
744}
745
746declare void @llvm.riscv.vsseg7.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", <vscale x 8 x i8>, 7), ptr, i64, i64)
747declare void @llvm.riscv.vsseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 7), ptr, <vscale x 8 x i1>, i64, i64)
748
749define void @test_vsseg7_nxv8i8_triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, i64 %vl) {
750; CHECK-LABEL: test_vsseg7_nxv8i8_triscv.vector.tuple_nxv8i8_7t:
751; CHECK:       # %bb.0: # %entry
752; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
753; CHECK-NEXT:    vsseg7e8.v v8, (a0)
754; CHECK-NEXT:    ret
755entry:
756  tail call void @llvm.riscv.vsseg7.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, i64 %vl, i64 3)
757  ret void
758}
759
760define void @test_vsseg7_mask_nxv8i8_triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, i64 %vl, <vscale x 8 x i1> %mask) {
761; CHECK-LABEL: test_vsseg7_mask_nxv8i8_triscv.vector.tuple_nxv8i8_7t:
762; CHECK:       # %bb.0: # %entry
763; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
764; CHECK-NEXT:    vsseg7e8.v v8, (a0), v0.t
765; CHECK-NEXT:    ret
766entry:
767  tail call void @llvm.riscv.vsseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, <vscale x 8 x i1> %mask, i64 %vl, i64 3)
768  ret void
769}
770
771declare void @llvm.riscv.vsseg8.triscv.vector.tuple_nxv1i8_8t(target("riscv.vector.tuple", <vscale x 1 x i8>, 8), ptr, i64, i64)
772declare void @llvm.riscv.vsseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i1(target("riscv.vector.tuple", <vscale x 1 x i8>, 8), ptr, <vscale x 1 x i1>, i64, i64)
773
774define void @test_vsseg8_nxv1i8_triscv.vector.tuple_nxv1i8_8t(target("riscv.vector.tuple", <vscale x 1 x i8>, 8) %val, ptr %base, i64 %vl) {
775; CHECK-LABEL: test_vsseg8_nxv1i8_triscv.vector.tuple_nxv1i8_8t:
776; CHECK:       # %bb.0: # %entry
777; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
778; CHECK-NEXT:    vsseg8e8.v v8, (a0)
779; CHECK-NEXT:    ret
780entry:
781  tail call void @llvm.riscv.vsseg8.triscv.vector.tuple_nxv1i8_8t(target("riscv.vector.tuple", <vscale x 1 x i8>, 8) %val, ptr %base, i64 %vl, i64 3)
782  ret void
783}
784
785define void @test_vsseg8_mask_nxv1i8_triscv.vector.tuple_nxv1i8_8t(target("riscv.vector.tuple", <vscale x 1 x i8>, 8) %val, ptr %base, i64 %vl, <vscale x 1 x i1> %mask) {
786; CHECK-LABEL: test_vsseg8_mask_nxv1i8_triscv.vector.tuple_nxv1i8_8t:
787; CHECK:       # %bb.0: # %entry
788; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
789; CHECK-NEXT:    vsseg8e8.v v8, (a0), v0.t
790; CHECK-NEXT:    ret
791entry:
792  tail call void @llvm.riscv.vsseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i1(target("riscv.vector.tuple", <vscale x 1 x i8>, 8) %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl, i64 3)
793  ret void
794}
795
796define void @test_vsseg8_allonesmask_nxv1i8_triscv.vector.tuple_nxv1i8_8t(target("riscv.vector.tuple", <vscale x 1 x i8>, 8) %val, ptr %base, i64 %vl, <vscale x 1 x i1> %mask) {
797; CHECK-LABEL: test_vsseg8_allonesmask_nxv1i8_triscv.vector.tuple_nxv1i8_8t:
798; CHECK:       # %bb.0: # %entry
799; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
800; CHECK-NEXT:    vsseg8e8.v v8, (a0)
801; CHECK-NEXT:    ret
802entry:
803  tail call void @llvm.riscv.vsseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i1(target("riscv.vector.tuple", <vscale x 1 x i8>, 8) %val, ptr %base, <vscale x 1 x i1> splat (i1 true), i64 %vl, i64 3)
804  ret void
805}
806
807declare void @llvm.riscv.vsseg8.triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", <vscale x 2 x i8>, 8), ptr, i64, i64)
808declare void @llvm.riscv.vsseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 8), ptr, <vscale x 2 x i1>, i64, i64)
809
810define void @test_vsseg8_nxv2i8_triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) %val, ptr %base, i64 %vl) {
811; CHECK-LABEL: test_vsseg8_nxv2i8_triscv.vector.tuple_nxv2i8_8t:
812; CHECK:       # %bb.0: # %entry
813; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
814; CHECK-NEXT:    vsseg8e8.v v8, (a0)
815; CHECK-NEXT:    ret
816entry:
817  tail call void @llvm.riscv.vsseg8.triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) %val, ptr %base, i64 %vl, i64 3)
818  ret void
819}
820
821define void @test_vsseg8_mask_nxv2i8_triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) %val, ptr %base, i64 %vl, <vscale x 2 x i1> %mask) {
822; CHECK-LABEL: test_vsseg8_mask_nxv2i8_triscv.vector.tuple_nxv2i8_8t:
823; CHECK:       # %bb.0: # %entry
824; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
825; CHECK-NEXT:    vsseg8e8.v v8, (a0), v0.t
826; CHECK-NEXT:    ret
827entry:
828  tail call void @llvm.riscv.vsseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl, i64 3)
829  ret void
830}
831
832declare void @llvm.riscv.vsseg8.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", <vscale x 4 x i8>, 8), ptr, i64, i64)
833declare void @llvm.riscv.vsseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 8), ptr, <vscale x 4 x i1>, i64, i64)
834
835define void @test_vsseg8_nxv4i8_triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %val, ptr %base, i64 %vl) {
836; CHECK-LABEL: test_vsseg8_nxv4i8_triscv.vector.tuple_nxv4i8_8t:
837; CHECK:       # %bb.0: # %entry
838; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
839; CHECK-NEXT:    vsseg8e8.v v8, (a0)
840; CHECK-NEXT:    ret
841entry:
842  tail call void @llvm.riscv.vsseg8.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %val, ptr %base, i64 %vl, i64 3)
843  ret void
844}
845
846define void @test_vsseg8_mask_nxv4i8_triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %val, ptr %base, i64 %vl, <vscale x 4 x i1> %mask) {
847; CHECK-LABEL: test_vsseg8_mask_nxv4i8_triscv.vector.tuple_nxv4i8_8t:
848; CHECK:       # %bb.0: # %entry
849; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
850; CHECK-NEXT:    vsseg8e8.v v8, (a0), v0.t
851; CHECK-NEXT:    ret
852entry:
853  tail call void @llvm.riscv.vsseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %val, ptr %base, <vscale x 4 x i1> %mask, i64 %vl, i64 3)
854  ret void
855}
856
857declare void @llvm.riscv.vsseg8.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", <vscale x 8 x i8>, 8), ptr, i64, i64)
858declare void @llvm.riscv.vsseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 8), ptr, <vscale x 8 x i1>, i64, i64)
859
860define void @test_vsseg8_nxv8i8_triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, i64 %vl) {
861; CHECK-LABEL: test_vsseg8_nxv8i8_triscv.vector.tuple_nxv8i8_8t:
862; CHECK:       # %bb.0: # %entry
863; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
864; CHECK-NEXT:    vsseg8e8.v v8, (a0)
865; CHECK-NEXT:    ret
866entry:
867  tail call void @llvm.riscv.vsseg8.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, i64 %vl, i64 3)
868  ret void
869}
870
871define void @test_vsseg8_mask_nxv8i8_triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, i64 %vl, <vscale x 8 x i1> %mask) {
872; CHECK-LABEL: test_vsseg8_mask_nxv8i8_triscv.vector.tuple_nxv8i8_8t:
873; CHECK:       # %bb.0: # %entry
874; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
875; CHECK-NEXT:    vsseg8e8.v v8, (a0), v0.t
876; CHECK-NEXT:    ret
877entry:
878  tail call void @llvm.riscv.vsseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, <vscale x 8 x i1> %mask, i64 %vl, i64 3)
879  ret void
880}
881
882declare void @llvm.riscv.vsseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 2), ptr, <vscale x 1 x i1>, i64, i64)
883
884define void @test_vsseg2_nxv1i16_triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) %val, ptr %base, i64 %vl) {
885; CHECK-LABEL: test_vsseg2_nxv1i16_triscv.vector.tuple_nxv2i8_2t:
886; CHECK:       # %bb.0: # %entry
887; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
888; CHECK-NEXT:    vsseg2e16.v v8, (a0)
889; CHECK-NEXT:    ret
890entry:
891  tail call void @llvm.riscv.vsseg2.triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) %val, ptr %base, i64 %vl, i64 4)
892  ret void
893}
894
895define void @test_vsseg2_mask_nxv1i16_triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) %val, ptr %base, i64 %vl, <vscale x 1 x i1> %mask) {
896; CHECK-LABEL: test_vsseg2_mask_nxv1i16_triscv.vector.tuple_nxv2i8_2t:
897; CHECK:       # %bb.0: # %entry
898; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
899; CHECK-NEXT:    vsseg2e16.v v8, (a0), v0.t
900; CHECK-NEXT:    ret
901entry:
902  tail call void @llvm.riscv.vsseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl, i64 4)
903  ret void
904}
905
906declare void @llvm.riscv.vsseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 2), ptr, <vscale x 2 x i1>, i64, i64)
907
908define void @test_vsseg2_nxv2i16_triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %val, ptr %base, i64 %vl) {
909; CHECK-LABEL: test_vsseg2_nxv2i16_triscv.vector.tuple_nxv4i8_2t:
910; CHECK:       # %bb.0: # %entry
911; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
912; CHECK-NEXT:    vsseg2e16.v v8, (a0)
913; CHECK-NEXT:    ret
914entry:
915  tail call void @llvm.riscv.vsseg2.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %val, ptr %base, i64 %vl, i64 4)
916  ret void
917}
918
919define void @test_vsseg2_mask_nxv2i16_triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %val, ptr %base, i64 %vl, <vscale x 2 x i1> %mask) {
920; CHECK-LABEL: test_vsseg2_mask_nxv2i16_triscv.vector.tuple_nxv4i8_2t:
921; CHECK:       # %bb.0: # %entry
922; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
923; CHECK-NEXT:    vsseg2e16.v v8, (a0), v0.t
924; CHECK-NEXT:    ret
925entry:
926  tail call void @llvm.riscv.vsseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl, i64 4)
927  ret void
928}
929
930declare void @llvm.riscv.vsseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 2), ptr, <vscale x 4 x i1>, i64, i64)
931
932define void @test_vsseg2_nxv4i16_triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, i64 %vl) {
933; CHECK-LABEL: test_vsseg2_nxv4i16_triscv.vector.tuple_nxv8i8_2t:
934; CHECK:       # %bb.0: # %entry
935; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
936; CHECK-NEXT:    vsseg2e16.v v8, (a0)
937; CHECK-NEXT:    ret
938entry:
939  tail call void @llvm.riscv.vsseg2.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, i64 %vl, i64 4)
940  ret void
941}
942
943define void @test_vsseg2_mask_nxv4i16_triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, i64 %vl, <vscale x 4 x i1> %mask) {
944; CHECK-LABEL: test_vsseg2_mask_nxv4i16_triscv.vector.tuple_nxv8i8_2t:
945; CHECK:       # %bb.0: # %entry
946; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
947; CHECK-NEXT:    vsseg2e16.v v8, (a0), v0.t
948; CHECK-NEXT:    ret
949entry:
950  tail call void @llvm.riscv.vsseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, <vscale x 4 x i1> %mask, i64 %vl, i64 4)
951  ret void
952}
953
954declare void @llvm.riscv.vsseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 2), ptr, <vscale x 8 x i1>, i64, i64)
955
956define void @test_vsseg2_nxv8i16_triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, i64 %vl) {
957; CHECK-LABEL: test_vsseg2_nxv8i16_triscv.vector.tuple_nxv16i8_2t:
958; CHECK:       # %bb.0: # %entry
959; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
960; CHECK-NEXT:    vsseg2e16.v v8, (a0)
961; CHECK-NEXT:    ret
962entry:
963  tail call void @llvm.riscv.vsseg2.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, i64 %vl, i64 4)
964  ret void
965}
966
967define void @test_vsseg2_mask_nxv8i16_triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, i64 %vl, <vscale x 8 x i1> %mask) {
968; CHECK-LABEL: test_vsseg2_mask_nxv8i16_triscv.vector.tuple_nxv16i8_2t:
969; CHECK:       # %bb.0: # %entry
970; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
971; CHECK-NEXT:    vsseg2e16.v v8, (a0), v0.t
972; CHECK-NEXT:    ret
973entry:
974  tail call void @llvm.riscv.vsseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, <vscale x 8 x i1> %mask, i64 %vl, i64 4)
975  ret void
976}
977
978declare void @llvm.riscv.vsseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i1(target("riscv.vector.tuple", <vscale x 32 x i8>, 2), ptr, <vscale x 16 x i1>, i64, i64)
979
980define void @test_vsseg2_nxv16i16_triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, i64 %vl) {
981; CHECK-LABEL: test_vsseg2_nxv16i16_triscv.vector.tuple_nxv32i8_2t:
982; CHECK:       # %bb.0: # %entry
983; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
984; CHECK-NEXT:    vsseg2e16.v v8, (a0)
985; CHECK-NEXT:    ret
986entry:
987  tail call void @llvm.riscv.vsseg2.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, i64 %vl, i64 4)
988  ret void
989}
990
991define void @test_vsseg2_mask_nxv16i16_triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, i64 %vl, <vscale x 16 x i1> %mask) {
992; CHECK-LABEL: test_vsseg2_mask_nxv16i16_triscv.vector.tuple_nxv32i8_2t:
993; CHECK:       # %bb.0: # %entry
994; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
995; CHECK-NEXT:    vsseg2e16.v v8, (a0), v0.t
996; CHECK-NEXT:    ret
997entry:
998  tail call void @llvm.riscv.vsseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i1(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, <vscale x 16 x i1> %mask, i64 %vl, i64 4)
999  ret void
1000}
1001
1002declare void @llvm.riscv.vsseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 3), ptr, <vscale x 1 x i1>, i64, i64)
1003
1004define void @test_vsseg3_nxv1i16_triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) %val, ptr %base, i64 %vl) {
1005; CHECK-LABEL: test_vsseg3_nxv1i16_triscv.vector.tuple_nxv2i8_3t:
1006; CHECK:       # %bb.0: # %entry
1007; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
1008; CHECK-NEXT:    vsseg3e16.v v8, (a0)
1009; CHECK-NEXT:    ret
1010entry:
1011  tail call void @llvm.riscv.vsseg3.triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) %val, ptr %base, i64 %vl, i64 4)
1012  ret void
1013}
1014
1015define void @test_vsseg3_mask_nxv1i16_triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) %val, ptr %base, i64 %vl, <vscale x 1 x i1> %mask) {
1016; CHECK-LABEL: test_vsseg3_mask_nxv1i16_triscv.vector.tuple_nxv2i8_3t:
1017; CHECK:       # %bb.0: # %entry
1018; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
1019; CHECK-NEXT:    vsseg3e16.v v8, (a0), v0.t
1020; CHECK-NEXT:    ret
1021entry:
1022  tail call void @llvm.riscv.vsseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl, i64 4)
1023  ret void
1024}
1025
1026declare void @llvm.riscv.vsseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 3), ptr, <vscale x 2 x i1>, i64, i64)
1027
1028define void @test_vsseg3_nxv2i16_triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %val, ptr %base, i64 %vl) {
1029; CHECK-LABEL: test_vsseg3_nxv2i16_triscv.vector.tuple_nxv4i8_3t:
1030; CHECK:       # %bb.0: # %entry
1031; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
1032; CHECK-NEXT:    vsseg3e16.v v8, (a0)
1033; CHECK-NEXT:    ret
1034entry:
1035  tail call void @llvm.riscv.vsseg3.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %val, ptr %base, i64 %vl, i64 4)
1036  ret void
1037}
1038
1039define void @test_vsseg3_mask_nxv2i16_triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %val, ptr %base, i64 %vl, <vscale x 2 x i1> %mask) {
1040; CHECK-LABEL: test_vsseg3_mask_nxv2i16_triscv.vector.tuple_nxv4i8_3t:
1041; CHECK:       # %bb.0: # %entry
1042; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
1043; CHECK-NEXT:    vsseg3e16.v v8, (a0), v0.t
1044; CHECK-NEXT:    ret
1045entry:
1046  tail call void @llvm.riscv.vsseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl, i64 4)
1047  ret void
1048}
1049
1050declare void @llvm.riscv.vsseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 3), ptr, <vscale x 4 x i1>, i64, i64)
1051
1052define void @test_vsseg3_nxv4i16_triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, i64 %vl) {
1053; CHECK-LABEL: test_vsseg3_nxv4i16_triscv.vector.tuple_nxv8i8_3t:
1054; CHECK:       # %bb.0: # %entry
1055; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
1056; CHECK-NEXT:    vsseg3e16.v v8, (a0)
1057; CHECK-NEXT:    ret
1058entry:
1059  tail call void @llvm.riscv.vsseg3.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, i64 %vl, i64 4)
1060  ret void
1061}
1062
1063define void @test_vsseg3_mask_nxv4i16_triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, i64 %vl, <vscale x 4 x i1> %mask) {
1064; CHECK-LABEL: test_vsseg3_mask_nxv4i16_triscv.vector.tuple_nxv8i8_3t:
1065; CHECK:       # %bb.0: # %entry
1066; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
1067; CHECK-NEXT:    vsseg3e16.v v8, (a0), v0.t
1068; CHECK-NEXT:    ret
1069entry:
1070  tail call void @llvm.riscv.vsseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, <vscale x 4 x i1> %mask, i64 %vl, i64 4)
1071  ret void
1072}
1073
1074declare void @llvm.riscv.vsseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 3), ptr, <vscale x 8 x i1>, i64, i64)
1075
1076define void @test_vsseg3_nxv8i16_triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, i64 %vl) {
1077; CHECK-LABEL: test_vsseg3_nxv8i16_triscv.vector.tuple_nxv16i8_3t:
1078; CHECK:       # %bb.0: # %entry
1079; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
1080; CHECK-NEXT:    vsseg3e16.v v8, (a0)
1081; CHECK-NEXT:    ret
1082entry:
1083  tail call void @llvm.riscv.vsseg3.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, i64 %vl, i64 4)
1084  ret void
1085}
1086
1087define void @test_vsseg3_mask_nxv8i16_triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, i64 %vl, <vscale x 8 x i1> %mask) {
1088; CHECK-LABEL: test_vsseg3_mask_nxv8i16_triscv.vector.tuple_nxv16i8_3t:
1089; CHECK:       # %bb.0: # %entry
1090; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
1091; CHECK-NEXT:    vsseg3e16.v v8, (a0), v0.t
1092; CHECK-NEXT:    ret
1093entry:
1094  tail call void @llvm.riscv.vsseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, <vscale x 8 x i1> %mask, i64 %vl, i64 4)
1095  ret void
1096}
1097
1098declare void @llvm.riscv.vsseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 4), ptr, <vscale x 1 x i1>, i64, i64)
1099
1100define void @test_vsseg4_nxv1i16_triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) %val, ptr %base, i64 %vl) {
1101; CHECK-LABEL: test_vsseg4_nxv1i16_triscv.vector.tuple_nxv2i8_4t:
1102; CHECK:       # %bb.0: # %entry
1103; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
1104; CHECK-NEXT:    vsseg4e16.v v8, (a0)
1105; CHECK-NEXT:    ret
1106entry:
1107  tail call void @llvm.riscv.vsseg4.triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) %val, ptr %base, i64 %vl, i64 4)
1108  ret void
1109}
1110
1111define void @test_vsseg4_mask_nxv1i16_triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) %val, ptr %base, i64 %vl, <vscale x 1 x i1> %mask) {
1112; CHECK-LABEL: test_vsseg4_mask_nxv1i16_triscv.vector.tuple_nxv2i8_4t:
1113; CHECK:       # %bb.0: # %entry
1114; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
1115; CHECK-NEXT:    vsseg4e16.v v8, (a0), v0.t
1116; CHECK-NEXT:    ret
1117entry:
1118  tail call void @llvm.riscv.vsseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl, i64 4)
1119  ret void
1120}
1121
1122declare void @llvm.riscv.vsseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 4), ptr, <vscale x 2 x i1>, i64, i64)
1123
1124define void @test_vsseg4_nxv2i16_triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %val, ptr %base, i64 %vl) {
1125; CHECK-LABEL: test_vsseg4_nxv2i16_triscv.vector.tuple_nxv4i8_4t:
1126; CHECK:       # %bb.0: # %entry
1127; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
1128; CHECK-NEXT:    vsseg4e16.v v8, (a0)
1129; CHECK-NEXT:    ret
1130entry:
1131  tail call void @llvm.riscv.vsseg4.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %val, ptr %base, i64 %vl, i64 4)
1132  ret void
1133}
1134
1135define void @test_vsseg4_mask_nxv2i16_triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %val, ptr %base, i64 %vl, <vscale x 2 x i1> %mask) {
1136; CHECK-LABEL: test_vsseg4_mask_nxv2i16_triscv.vector.tuple_nxv4i8_4t:
1137; CHECK:       # %bb.0: # %entry
1138; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
1139; CHECK-NEXT:    vsseg4e16.v v8, (a0), v0.t
1140; CHECK-NEXT:    ret
1141entry:
1142  tail call void @llvm.riscv.vsseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl, i64 4)
1143  ret void
1144}
1145
1146declare void @llvm.riscv.vsseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 4), ptr, <vscale x 4 x i1>, i64, i64)
1147
1148define void @test_vsseg4_nxv4i16_triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, i64 %vl) {
1149; CHECK-LABEL: test_vsseg4_nxv4i16_triscv.vector.tuple_nxv8i8_4t:
1150; CHECK:       # %bb.0: # %entry
1151; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
1152; CHECK-NEXT:    vsseg4e16.v v8, (a0)
1153; CHECK-NEXT:    ret
1154entry:
1155  tail call void @llvm.riscv.vsseg4.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, i64 %vl, i64 4)
1156  ret void
1157}
1158
1159define void @test_vsseg4_mask_nxv4i16_triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, i64 %vl, <vscale x 4 x i1> %mask) {
1160; CHECK-LABEL: test_vsseg4_mask_nxv4i16_triscv.vector.tuple_nxv8i8_4t:
1161; CHECK:       # %bb.0: # %entry
1162; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
1163; CHECK-NEXT:    vsseg4e16.v v8, (a0), v0.t
1164; CHECK-NEXT:    ret
1165entry:
1166  tail call void @llvm.riscv.vsseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, <vscale x 4 x i1> %mask, i64 %vl, i64 4)
1167  ret void
1168}
1169
1170declare void @llvm.riscv.vsseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 4), ptr, <vscale x 8 x i1>, i64, i64)
1171
1172define void @test_vsseg4_nxv8i16_triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, i64 %vl) {
1173; CHECK-LABEL: test_vsseg4_nxv8i16_triscv.vector.tuple_nxv16i8_4t:
1174; CHECK:       # %bb.0: # %entry
1175; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
1176; CHECK-NEXT:    vsseg4e16.v v8, (a0)
1177; CHECK-NEXT:    ret
1178entry:
1179  tail call void @llvm.riscv.vsseg4.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, i64 %vl, i64 4)
1180  ret void
1181}
1182
1183define void @test_vsseg4_mask_nxv8i16_triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, i64 %vl, <vscale x 8 x i1> %mask) {
1184; CHECK-LABEL: test_vsseg4_mask_nxv8i16_triscv.vector.tuple_nxv16i8_4t:
1185; CHECK:       # %bb.0: # %entry
1186; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
1187; CHECK-NEXT:    vsseg4e16.v v8, (a0), v0.t
1188; CHECK-NEXT:    ret
1189entry:
1190  tail call void @llvm.riscv.vsseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, <vscale x 8 x i1> %mask, i64 %vl, i64 4)
1191  ret void
1192}
1193
1194declare void @llvm.riscv.vsseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 5), ptr, <vscale x 1 x i1>, i64, i64)
1195
1196define void @test_vsseg5_nxv1i16_triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) %val, ptr %base, i64 %vl) {
1197; CHECK-LABEL: test_vsseg5_nxv1i16_triscv.vector.tuple_nxv2i8_5t:
1198; CHECK:       # %bb.0: # %entry
1199; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
1200; CHECK-NEXT:    vsseg5e16.v v8, (a0)
1201; CHECK-NEXT:    ret
1202entry:
1203  tail call void @llvm.riscv.vsseg5.triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) %val, ptr %base, i64 %vl, i64 4)
1204  ret void
1205}
1206
1207define void @test_vsseg5_mask_nxv1i16_triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) %val, ptr %base, i64 %vl, <vscale x 1 x i1> %mask) {
1208; CHECK-LABEL: test_vsseg5_mask_nxv1i16_triscv.vector.tuple_nxv2i8_5t:
1209; CHECK:       # %bb.0: # %entry
1210; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
1211; CHECK-NEXT:    vsseg5e16.v v8, (a0), v0.t
1212; CHECK-NEXT:    ret
1213entry:
1214  tail call void @llvm.riscv.vsseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl, i64 4)
1215  ret void
1216}
1217
1218declare void @llvm.riscv.vsseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 5), ptr, <vscale x 2 x i1>, i64, i64)
1219
1220define void @test_vsseg5_nxv2i16_triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) %val, ptr %base, i64 %vl) {
1221; CHECK-LABEL: test_vsseg5_nxv2i16_triscv.vector.tuple_nxv4i8_5t:
1222; CHECK:       # %bb.0: # %entry
1223; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
1224; CHECK-NEXT:    vsseg5e16.v v8, (a0)
1225; CHECK-NEXT:    ret
1226entry:
1227  tail call void @llvm.riscv.vsseg5.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) %val, ptr %base, i64 %vl, i64 4)
1228  ret void
1229}
1230
1231define void @test_vsseg5_mask_nxv2i16_triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) %val, ptr %base, i64 %vl, <vscale x 2 x i1> %mask) {
1232; CHECK-LABEL: test_vsseg5_mask_nxv2i16_triscv.vector.tuple_nxv4i8_5t:
1233; CHECK:       # %bb.0: # %entry
1234; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
1235; CHECK-NEXT:    vsseg5e16.v v8, (a0), v0.t
1236; CHECK-NEXT:    ret
1237entry:
1238  tail call void @llvm.riscv.vsseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl, i64 4)
1239  ret void
1240}
1241
1242declare void @llvm.riscv.vsseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 5), ptr, <vscale x 4 x i1>, i64, i64)
1243
1244define void @test_vsseg5_nxv4i16_triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, i64 %vl) {
1245; CHECK-LABEL: test_vsseg5_nxv4i16_triscv.vector.tuple_nxv8i8_5t:
1246; CHECK:       # %bb.0: # %entry
1247; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
1248; CHECK-NEXT:    vsseg5e16.v v8, (a0)
1249; CHECK-NEXT:    ret
1250entry:
1251  tail call void @llvm.riscv.vsseg5.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, i64 %vl, i64 4)
1252  ret void
1253}
1254
1255define void @test_vsseg5_mask_nxv4i16_triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, i64 %vl, <vscale x 4 x i1> %mask) {
1256; CHECK-LABEL: test_vsseg5_mask_nxv4i16_triscv.vector.tuple_nxv8i8_5t:
1257; CHECK:       # %bb.0: # %entry
1258; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
1259; CHECK-NEXT:    vsseg5e16.v v8, (a0), v0.t
1260; CHECK-NEXT:    ret
1261entry:
1262  tail call void @llvm.riscv.vsseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, <vscale x 4 x i1> %mask, i64 %vl, i64 4)
1263  ret void
1264}
1265
1266declare void @llvm.riscv.vsseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 6), ptr, <vscale x 1 x i1>, i64, i64)
1267
1268define void @test_vsseg6_nxv1i16_triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) %val, ptr %base, i64 %vl) {
1269; CHECK-LABEL: test_vsseg6_nxv1i16_triscv.vector.tuple_nxv2i8_6t:
1270; CHECK:       # %bb.0: # %entry
1271; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
1272; CHECK-NEXT:    vsseg6e16.v v8, (a0)
1273; CHECK-NEXT:    ret
1274entry:
1275  tail call void @llvm.riscv.vsseg6.triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) %val, ptr %base, i64 %vl, i64 4)
1276  ret void
1277}
1278
1279define void @test_vsseg6_mask_nxv1i16_triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) %val, ptr %base, i64 %vl, <vscale x 1 x i1> %mask) {
1280; CHECK-LABEL: test_vsseg6_mask_nxv1i16_triscv.vector.tuple_nxv2i8_6t:
1281; CHECK:       # %bb.0: # %entry
1282; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
1283; CHECK-NEXT:    vsseg6e16.v v8, (a0), v0.t
1284; CHECK-NEXT:    ret
1285entry:
1286  tail call void @llvm.riscv.vsseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl, i64 4)
1287  ret void
1288}
1289
1290declare void @llvm.riscv.vsseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 6), ptr, <vscale x 2 x i1>, i64, i64)
1291
1292define void @test_vsseg6_nxv2i16_triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) %val, ptr %base, i64 %vl) {
1293; CHECK-LABEL: test_vsseg6_nxv2i16_triscv.vector.tuple_nxv4i8_6t:
1294; CHECK:       # %bb.0: # %entry
1295; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
1296; CHECK-NEXT:    vsseg6e16.v v8, (a0)
1297; CHECK-NEXT:    ret
1298entry:
1299  tail call void @llvm.riscv.vsseg6.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) %val, ptr %base, i64 %vl, i64 4)
1300  ret void
1301}
1302
1303define void @test_vsseg6_mask_nxv2i16_triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) %val, ptr %base, i64 %vl, <vscale x 2 x i1> %mask) {
1304; CHECK-LABEL: test_vsseg6_mask_nxv2i16_triscv.vector.tuple_nxv4i8_6t:
1305; CHECK:       # %bb.0: # %entry
1306; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
1307; CHECK-NEXT:    vsseg6e16.v v8, (a0), v0.t
1308; CHECK-NEXT:    ret
1309entry:
1310  tail call void @llvm.riscv.vsseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl, i64 4)
1311  ret void
1312}
1313
1314declare void @llvm.riscv.vsseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 6), ptr, <vscale x 4 x i1>, i64, i64)
1315
1316define void @test_vsseg6_nxv4i16_triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, i64 %vl) {
1317; CHECK-LABEL: test_vsseg6_nxv4i16_triscv.vector.tuple_nxv8i8_6t:
1318; CHECK:       # %bb.0: # %entry
1319; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
1320; CHECK-NEXT:    vsseg6e16.v v8, (a0)
1321; CHECK-NEXT:    ret
1322entry:
1323  tail call void @llvm.riscv.vsseg6.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, i64 %vl, i64 4)
1324  ret void
1325}
1326
1327define void @test_vsseg6_mask_nxv4i16_triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, i64 %vl, <vscale x 4 x i1> %mask) {
1328; CHECK-LABEL: test_vsseg6_mask_nxv4i16_triscv.vector.tuple_nxv8i8_6t:
1329; CHECK:       # %bb.0: # %entry
1330; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
1331; CHECK-NEXT:    vsseg6e16.v v8, (a0), v0.t
1332; CHECK-NEXT:    ret
1333entry:
1334  tail call void @llvm.riscv.vsseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, <vscale x 4 x i1> %mask, i64 %vl, i64 4)
1335  ret void
1336}
1337
1338declare void @llvm.riscv.vsseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 7), ptr, <vscale x 1 x i1>, i64, i64)
1339
1340define void @test_vsseg7_nxv1i16_triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) %val, ptr %base, i64 %vl) {
1341; CHECK-LABEL: test_vsseg7_nxv1i16_triscv.vector.tuple_nxv2i8_7t:
1342; CHECK:       # %bb.0: # %entry
1343; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
1344; CHECK-NEXT:    vsseg7e16.v v8, (a0)
1345; CHECK-NEXT:    ret
1346entry:
1347  tail call void @llvm.riscv.vsseg7.triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) %val, ptr %base, i64 %vl, i64 4)
1348  ret void
1349}
1350
1351define void @test_vsseg7_mask_nxv1i16_triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) %val, ptr %base, i64 %vl, <vscale x 1 x i1> %mask) {
1352; CHECK-LABEL: test_vsseg7_mask_nxv1i16_triscv.vector.tuple_nxv2i8_7t:
1353; CHECK:       # %bb.0: # %entry
1354; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
1355; CHECK-NEXT:    vsseg7e16.v v8, (a0), v0.t
1356; CHECK-NEXT:    ret
1357entry:
1358  tail call void @llvm.riscv.vsseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl, i64 4)
1359  ret void
1360}
1361
1362declare void @llvm.riscv.vsseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 7), ptr, <vscale x 2 x i1>, i64, i64)
1363
1364define void @test_vsseg7_nxv2i16_triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) %val, ptr %base, i64 %vl) {
1365; CHECK-LABEL: test_vsseg7_nxv2i16_triscv.vector.tuple_nxv4i8_7t:
1366; CHECK:       # %bb.0: # %entry
1367; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
1368; CHECK-NEXT:    vsseg7e16.v v8, (a0)
1369; CHECK-NEXT:    ret
1370entry:
1371  tail call void @llvm.riscv.vsseg7.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) %val, ptr %base, i64 %vl, i64 4)
1372  ret void
1373}
1374
1375define void @test_vsseg7_mask_nxv2i16_triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) %val, ptr %base, i64 %vl, <vscale x 2 x i1> %mask) {
1376; CHECK-LABEL: test_vsseg7_mask_nxv2i16_triscv.vector.tuple_nxv4i8_7t:
1377; CHECK:       # %bb.0: # %entry
1378; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
1379; CHECK-NEXT:    vsseg7e16.v v8, (a0), v0.t
1380; CHECK-NEXT:    ret
1381entry:
1382  tail call void @llvm.riscv.vsseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl, i64 4)
1383  ret void
1384}
1385
1386declare void @llvm.riscv.vsseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 7), ptr, <vscale x 4 x i1>, i64, i64)
1387
1388define void @test_vsseg7_nxv4i16_triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, i64 %vl) {
1389; CHECK-LABEL: test_vsseg7_nxv4i16_triscv.vector.tuple_nxv8i8_7t:
1390; CHECK:       # %bb.0: # %entry
1391; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
1392; CHECK-NEXT:    vsseg7e16.v v8, (a0)
1393; CHECK-NEXT:    ret
1394entry:
1395  tail call void @llvm.riscv.vsseg7.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, i64 %vl, i64 4)
1396  ret void
1397}
1398
1399define void @test_vsseg7_mask_nxv4i16_triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, i64 %vl, <vscale x 4 x i1> %mask) {
1400; CHECK-LABEL: test_vsseg7_mask_nxv4i16_triscv.vector.tuple_nxv8i8_7t:
1401; CHECK:       # %bb.0: # %entry
1402; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
1403; CHECK-NEXT:    vsseg7e16.v v8, (a0), v0.t
1404; CHECK-NEXT:    ret
1405entry:
1406  tail call void @llvm.riscv.vsseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, <vscale x 4 x i1> %mask, i64 %vl, i64 4)
1407  ret void
1408}
1409
1410declare void @llvm.riscv.vsseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 8), ptr, <vscale x 1 x i1>, i64, i64)
1411
1412define void @test_vsseg8_nxv1i16_triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) %val, ptr %base, i64 %vl) {
1413; CHECK-LABEL: test_vsseg8_nxv1i16_triscv.vector.tuple_nxv2i8_8t:
1414; CHECK:       # %bb.0: # %entry
1415; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
1416; CHECK-NEXT:    vsseg8e16.v v8, (a0)
1417; CHECK-NEXT:    ret
1418entry:
1419  tail call void @llvm.riscv.vsseg8.triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) %val, ptr %base, i64 %vl, i64 4)
1420  ret void
1421}
1422
1423define void @test_vsseg8_mask_nxv1i16_triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) %val, ptr %base, i64 %vl, <vscale x 1 x i1> %mask) {
1424; CHECK-LABEL: test_vsseg8_mask_nxv1i16_triscv.vector.tuple_nxv2i8_8t:
1425; CHECK:       # %bb.0: # %entry
1426; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
1427; CHECK-NEXT:    vsseg8e16.v v8, (a0), v0.t
1428; CHECK-NEXT:    ret
1429entry:
1430  tail call void @llvm.riscv.vsseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl, i64 4)
1431  ret void
1432}
1433
1434declare void @llvm.riscv.vsseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 8), ptr, <vscale x 2 x i1>, i64, i64)
1435
1436define void @test_vsseg8_nxv2i16_triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %val, ptr %base, i64 %vl) {
1437; CHECK-LABEL: test_vsseg8_nxv2i16_triscv.vector.tuple_nxv4i8_8t:
1438; CHECK:       # %bb.0: # %entry
1439; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
1440; CHECK-NEXT:    vsseg8e16.v v8, (a0)
1441; CHECK-NEXT:    ret
1442entry:
1443  tail call void @llvm.riscv.vsseg8.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %val, ptr %base, i64 %vl, i64 4)
1444  ret void
1445}
1446
1447define void @test_vsseg8_mask_nxv2i16_triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %val, ptr %base, i64 %vl, <vscale x 2 x i1> %mask) {
1448; CHECK-LABEL: test_vsseg8_mask_nxv2i16_triscv.vector.tuple_nxv4i8_8t:
1449; CHECK:       # %bb.0: # %entry
1450; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
1451; CHECK-NEXT:    vsseg8e16.v v8, (a0), v0.t
1452; CHECK-NEXT:    ret
1453entry:
1454  tail call void @llvm.riscv.vsseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl, i64 4)
1455  ret void
1456}
1457
1458declare void @llvm.riscv.vsseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 8), ptr, <vscale x 4 x i1>, i64, i64)
1459
1460define void @test_vsseg8_nxv4i16_triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, i64 %vl) {
1461; CHECK-LABEL: test_vsseg8_nxv4i16_triscv.vector.tuple_nxv8i8_8t:
1462; CHECK:       # %bb.0: # %entry
1463; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
1464; CHECK-NEXT:    vsseg8e16.v v8, (a0)
1465; CHECK-NEXT:    ret
1466entry:
1467  tail call void @llvm.riscv.vsseg8.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, i64 %vl, i64 4)
1468  ret void
1469}
1470
1471define void @test_vsseg8_mask_nxv4i16_triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, i64 %vl, <vscale x 4 x i1> %mask) {
1472; CHECK-LABEL: test_vsseg8_mask_nxv4i16_triscv.vector.tuple_nxv8i8_8t:
1473; CHECK:       # %bb.0: # %entry
1474; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
1475; CHECK-NEXT:    vsseg8e16.v v8, (a0), v0.t
1476; CHECK-NEXT:    ret
1477entry:
1478  tail call void @llvm.riscv.vsseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, <vscale x 4 x i1> %mask, i64 %vl, i64 4)
1479  ret void
1480}
1481
1482declare void @llvm.riscv.vsseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 2), ptr, <vscale x 1 x i1>, i64, i64)
1483
1484define void @test_vsseg2_nxv1i32_triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %val, ptr %base, i64 %vl) {
1485; CHECK-LABEL: test_vsseg2_nxv1i32_triscv.vector.tuple_nxv4i8_2t:
1486; CHECK:       # %bb.0: # %entry
1487; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
1488; CHECK-NEXT:    vsseg2e32.v v8, (a0)
1489; CHECK-NEXT:    ret
1490entry:
1491  tail call void @llvm.riscv.vsseg2.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %val, ptr %base, i64 %vl, i64 5)
1492  ret void
1493}
1494
1495define void @test_vsseg2_mask_nxv1i32_triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %val, ptr %base, i64 %vl, <vscale x 1 x i1> %mask) {
1496; CHECK-LABEL: test_vsseg2_mask_nxv1i32_triscv.vector.tuple_nxv4i8_2t:
1497; CHECK:       # %bb.0: # %entry
1498; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
1499; CHECK-NEXT:    vsseg2e32.v v8, (a0), v0.t
1500; CHECK-NEXT:    ret
1501entry:
1502  tail call void @llvm.riscv.vsseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl, i64 5)
1503  ret void
1504}
1505
1506declare void @llvm.riscv.vsseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 2), ptr, <vscale x 2 x i1>, i64, i64)
1507
1508define void @test_vsseg2_nxv2i32_triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, i64 %vl) {
1509; CHECK-LABEL: test_vsseg2_nxv2i32_triscv.vector.tuple_nxv8i8_2t:
1510; CHECK:       # %bb.0: # %entry
1511; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
1512; CHECK-NEXT:    vsseg2e32.v v8, (a0)
1513; CHECK-NEXT:    ret
1514entry:
1515  tail call void @llvm.riscv.vsseg2.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, i64 %vl, i64 5)
1516  ret void
1517}
1518
1519define void @test_vsseg2_mask_nxv2i32_triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, i64 %vl, <vscale x 2 x i1> %mask) {
1520; CHECK-LABEL: test_vsseg2_mask_nxv2i32_triscv.vector.tuple_nxv8i8_2t:
1521; CHECK:       # %bb.0: # %entry
1522; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
1523; CHECK-NEXT:    vsseg2e32.v v8, (a0), v0.t
1524; CHECK-NEXT:    ret
1525entry:
1526  tail call void @llvm.riscv.vsseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl, i64 5)
1527  ret void
1528}
1529
1530declare void @llvm.riscv.vsseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 2), ptr, <vscale x 4 x i1>, i64, i64)
1531
1532define void @test_vsseg2_nxv4i32_triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, i64 %vl) {
1533; CHECK-LABEL: test_vsseg2_nxv4i32_triscv.vector.tuple_nxv16i8_2t:
1534; CHECK:       # %bb.0: # %entry
1535; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
1536; CHECK-NEXT:    vsseg2e32.v v8, (a0)
1537; CHECK-NEXT:    ret
1538entry:
1539  tail call void @llvm.riscv.vsseg2.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, i64 %vl, i64 5)
1540  ret void
1541}
1542
1543define void @test_vsseg2_mask_nxv4i32_triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, i64 %vl, <vscale x 4 x i1> %mask) {
1544; CHECK-LABEL: test_vsseg2_mask_nxv4i32_triscv.vector.tuple_nxv16i8_2t:
1545; CHECK:       # %bb.0: # %entry
1546; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
1547; CHECK-NEXT:    vsseg2e32.v v8, (a0), v0.t
1548; CHECK-NEXT:    ret
1549entry:
1550  tail call void @llvm.riscv.vsseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, <vscale x 4 x i1> %mask, i64 %vl, i64 5)
1551  ret void
1552}
1553
1554declare void @llvm.riscv.vsseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i1(target("riscv.vector.tuple", <vscale x 32 x i8>, 2), ptr, <vscale x 8 x i1>, i64, i64)
1555
1556define void @test_vsseg2_nxv8i32_triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, i64 %vl) {
1557; CHECK-LABEL: test_vsseg2_nxv8i32_triscv.vector.tuple_nxv32i8_2t:
1558; CHECK:       # %bb.0: # %entry
1559; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
1560; CHECK-NEXT:    vsseg2e32.v v8, (a0)
1561; CHECK-NEXT:    ret
1562entry:
1563  tail call void @llvm.riscv.vsseg2.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, i64 %vl, i64 5)
1564  ret void
1565}
1566
1567define void @test_vsseg2_mask_nxv8i32_triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, i64 %vl, <vscale x 8 x i1> %mask) {
1568; CHECK-LABEL: test_vsseg2_mask_nxv8i32_triscv.vector.tuple_nxv32i8_2t:
1569; CHECK:       # %bb.0: # %entry
1570; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
1571; CHECK-NEXT:    vsseg2e32.v v8, (a0), v0.t
1572; CHECK-NEXT:    ret
1573entry:
1574  tail call void @llvm.riscv.vsseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i1(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, <vscale x 8 x i1> %mask, i64 %vl, i64 5)
1575  ret void
1576}
1577
1578declare void @llvm.riscv.vsseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 3), ptr, <vscale x 1 x i1>, i64, i64)
1579
1580define void @test_vsseg3_nxv1i32_triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %val, ptr %base, i64 %vl) {
1581; CHECK-LABEL: test_vsseg3_nxv1i32_triscv.vector.tuple_nxv4i8_3t:
1582; CHECK:       # %bb.0: # %entry
1583; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
1584; CHECK-NEXT:    vsseg3e32.v v8, (a0)
1585; CHECK-NEXT:    ret
1586entry:
1587  tail call void @llvm.riscv.vsseg3.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %val, ptr %base, i64 %vl, i64 5)
1588  ret void
1589}
1590
1591define void @test_vsseg3_mask_nxv1i32_triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %val, ptr %base, i64 %vl, <vscale x 1 x i1> %mask) {
1592; CHECK-LABEL: test_vsseg3_mask_nxv1i32_triscv.vector.tuple_nxv4i8_3t:
1593; CHECK:       # %bb.0: # %entry
1594; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
1595; CHECK-NEXT:    vsseg3e32.v v8, (a0), v0.t
1596; CHECK-NEXT:    ret
1597entry:
1598  tail call void @llvm.riscv.vsseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl, i64 5)
1599  ret void
1600}
1601
1602declare void @llvm.riscv.vsseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 3), ptr, <vscale x 2 x i1>, i64, i64)
1603
1604define void @test_vsseg3_nxv2i32_triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, i64 %vl) {
1605; CHECK-LABEL: test_vsseg3_nxv2i32_triscv.vector.tuple_nxv8i8_3t:
1606; CHECK:       # %bb.0: # %entry
1607; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
1608; CHECK-NEXT:    vsseg3e32.v v8, (a0)
1609; CHECK-NEXT:    ret
1610entry:
1611  tail call void @llvm.riscv.vsseg3.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, i64 %vl, i64 5)
1612  ret void
1613}
1614
1615define void @test_vsseg3_mask_nxv2i32_triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, i64 %vl, <vscale x 2 x i1> %mask) {
1616; CHECK-LABEL: test_vsseg3_mask_nxv2i32_triscv.vector.tuple_nxv8i8_3t:
1617; CHECK:       # %bb.0: # %entry
1618; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
1619; CHECK-NEXT:    vsseg3e32.v v8, (a0), v0.t
1620; CHECK-NEXT:    ret
1621entry:
1622  tail call void @llvm.riscv.vsseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl, i64 5)
1623  ret void
1624}
1625
1626declare void @llvm.riscv.vsseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 3), ptr, <vscale x 4 x i1>, i64, i64)
1627
1628define void @test_vsseg3_nxv4i32_triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, i64 %vl) {
1629; CHECK-LABEL: test_vsseg3_nxv4i32_triscv.vector.tuple_nxv16i8_3t:
1630; CHECK:       # %bb.0: # %entry
1631; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
1632; CHECK-NEXT:    vsseg3e32.v v8, (a0)
1633; CHECK-NEXT:    ret
1634entry:
1635  tail call void @llvm.riscv.vsseg3.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, i64 %vl, i64 5)
1636  ret void
1637}
1638
1639define void @test_vsseg3_mask_nxv4i32_triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, i64 %vl, <vscale x 4 x i1> %mask) {
1640; CHECK-LABEL: test_vsseg3_mask_nxv4i32_triscv.vector.tuple_nxv16i8_3t:
1641; CHECK:       # %bb.0: # %entry
1642; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
1643; CHECK-NEXT:    vsseg3e32.v v8, (a0), v0.t
1644; CHECK-NEXT:    ret
1645entry:
1646  tail call void @llvm.riscv.vsseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, <vscale x 4 x i1> %mask, i64 %vl, i64 5)
1647  ret void
1648}
1649
1650declare void @llvm.riscv.vsseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 4), ptr, <vscale x 1 x i1>, i64, i64)
1651
1652define void @test_vsseg4_nxv1i32_triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %val, ptr %base, i64 %vl) {
1653; CHECK-LABEL: test_vsseg4_nxv1i32_triscv.vector.tuple_nxv4i8_4t:
1654; CHECK:       # %bb.0: # %entry
1655; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
1656; CHECK-NEXT:    vsseg4e32.v v8, (a0)
1657; CHECK-NEXT:    ret
1658entry:
1659  tail call void @llvm.riscv.vsseg4.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %val, ptr %base, i64 %vl, i64 5)
1660  ret void
1661}
1662
1663define void @test_vsseg4_mask_nxv1i32_triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %val, ptr %base, i64 %vl, <vscale x 1 x i1> %mask) {
1664; CHECK-LABEL: test_vsseg4_mask_nxv1i32_triscv.vector.tuple_nxv4i8_4t:
1665; CHECK:       # %bb.0: # %entry
1666; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
1667; CHECK-NEXT:    vsseg4e32.v v8, (a0), v0.t
1668; CHECK-NEXT:    ret
1669entry:
1670  tail call void @llvm.riscv.vsseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl, i64 5)
1671  ret void
1672}
1673
1674declare void @llvm.riscv.vsseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 4), ptr, <vscale x 2 x i1>, i64, i64)
1675
1676define void @test_vsseg4_nxv2i32_triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, i64 %vl) {
1677; CHECK-LABEL: test_vsseg4_nxv2i32_triscv.vector.tuple_nxv8i8_4t:
1678; CHECK:       # %bb.0: # %entry
1679; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
1680; CHECK-NEXT:    vsseg4e32.v v8, (a0)
1681; CHECK-NEXT:    ret
1682entry:
1683  tail call void @llvm.riscv.vsseg4.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, i64 %vl, i64 5)
1684  ret void
1685}
1686
1687define void @test_vsseg4_mask_nxv2i32_triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, i64 %vl, <vscale x 2 x i1> %mask) {
1688; CHECK-LABEL: test_vsseg4_mask_nxv2i32_triscv.vector.tuple_nxv8i8_4t:
1689; CHECK:       # %bb.0: # %entry
1690; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
1691; CHECK-NEXT:    vsseg4e32.v v8, (a0), v0.t
1692; CHECK-NEXT:    ret
1693entry:
1694  tail call void @llvm.riscv.vsseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl, i64 5)
1695  ret void
1696}
1697
1698declare void @llvm.riscv.vsseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 4), ptr, <vscale x 4 x i1>, i64, i64)
1699
1700define void @test_vsseg4_nxv4i32_triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, i64 %vl) {
1701; CHECK-LABEL: test_vsseg4_nxv4i32_triscv.vector.tuple_nxv16i8_4t:
1702; CHECK:       # %bb.0: # %entry
1703; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
1704; CHECK-NEXT:    vsseg4e32.v v8, (a0)
1705; CHECK-NEXT:    ret
1706entry:
1707  tail call void @llvm.riscv.vsseg4.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, i64 %vl, i64 5)
1708  ret void
1709}
1710
1711define void @test_vsseg4_mask_nxv4i32_triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, i64 %vl, <vscale x 4 x i1> %mask) {
1712; CHECK-LABEL: test_vsseg4_mask_nxv4i32_triscv.vector.tuple_nxv16i8_4t:
1713; CHECK:       # %bb.0: # %entry
1714; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
1715; CHECK-NEXT:    vsseg4e32.v v8, (a0), v0.t
1716; CHECK-NEXT:    ret
1717entry:
1718  tail call void @llvm.riscv.vsseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, <vscale x 4 x i1> %mask, i64 %vl, i64 5)
1719  ret void
1720}
1721
1722declare void @llvm.riscv.vsseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 5), ptr, <vscale x 1 x i1>, i64, i64)
1723
1724define void @test_vsseg5_nxv1i32_triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) %val, ptr %base, i64 %vl) {
1725; CHECK-LABEL: test_vsseg5_nxv1i32_triscv.vector.tuple_nxv4i8_5t:
1726; CHECK:       # %bb.0: # %entry
1727; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
1728; CHECK-NEXT:    vsseg5e32.v v8, (a0)
1729; CHECK-NEXT:    ret
1730entry:
1731  tail call void @llvm.riscv.vsseg5.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) %val, ptr %base, i64 %vl, i64 5)
1732  ret void
1733}
1734
1735define void @test_vsseg5_mask_nxv1i32_triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) %val, ptr %base, i64 %vl, <vscale x 1 x i1> %mask) {
1736; CHECK-LABEL: test_vsseg5_mask_nxv1i32_triscv.vector.tuple_nxv4i8_5t:
1737; CHECK:       # %bb.0: # %entry
1738; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
1739; CHECK-NEXT:    vsseg5e32.v v8, (a0), v0.t
1740; CHECK-NEXT:    ret
1741entry:
1742  tail call void @llvm.riscv.vsseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl, i64 5)
1743  ret void
1744}
1745
1746declare void @llvm.riscv.vsseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 5), ptr, <vscale x 2 x i1>, i64, i64)
1747
1748define void @test_vsseg5_nxv2i32_triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, i64 %vl) {
1749; CHECK-LABEL: test_vsseg5_nxv2i32_triscv.vector.tuple_nxv8i8_5t:
1750; CHECK:       # %bb.0: # %entry
1751; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
1752; CHECK-NEXT:    vsseg5e32.v v8, (a0)
1753; CHECK-NEXT:    ret
1754entry:
1755  tail call void @llvm.riscv.vsseg5.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, i64 %vl, i64 5)
1756  ret void
1757}
1758
1759define void @test_vsseg5_mask_nxv2i32_triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, i64 %vl, <vscale x 2 x i1> %mask) {
1760; CHECK-LABEL: test_vsseg5_mask_nxv2i32_triscv.vector.tuple_nxv8i8_5t:
1761; CHECK:       # %bb.0: # %entry
1762; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
1763; CHECK-NEXT:    vsseg5e32.v v8, (a0), v0.t
1764; CHECK-NEXT:    ret
1765entry:
1766  tail call void @llvm.riscv.vsseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl, i64 5)
1767  ret void
1768}
1769
1770declare void @llvm.riscv.vsseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 6), ptr, <vscale x 1 x i1>, i64, i64)
1771
1772define void @test_vsseg6_nxv1i32_triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) %val, ptr %base, i64 %vl) {
1773; CHECK-LABEL: test_vsseg6_nxv1i32_triscv.vector.tuple_nxv4i8_6t:
1774; CHECK:       # %bb.0: # %entry
1775; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
1776; CHECK-NEXT:    vsseg6e32.v v8, (a0)
1777; CHECK-NEXT:    ret
1778entry:
1779  tail call void @llvm.riscv.vsseg6.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) %val, ptr %base, i64 %vl, i64 5)
1780  ret void
1781}
1782
1783define void @test_vsseg6_mask_nxv1i32_triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) %val, ptr %base, i64 %vl, <vscale x 1 x i1> %mask) {
1784; CHECK-LABEL: test_vsseg6_mask_nxv1i32_triscv.vector.tuple_nxv4i8_6t:
1785; CHECK:       # %bb.0: # %entry
1786; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
1787; CHECK-NEXT:    vsseg6e32.v v8, (a0), v0.t
1788; CHECK-NEXT:    ret
1789entry:
1790  tail call void @llvm.riscv.vsseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl, i64 5)
1791  ret void
1792}
1793
1794declare void @llvm.riscv.vsseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 6), ptr, <vscale x 2 x i1>, i64, i64)
1795
1796define void @test_vsseg6_nxv2i32_triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, i64 %vl) {
1797; CHECK-LABEL: test_vsseg6_nxv2i32_triscv.vector.tuple_nxv8i8_6t:
1798; CHECK:       # %bb.0: # %entry
1799; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
1800; CHECK-NEXT:    vsseg6e32.v v8, (a0)
1801; CHECK-NEXT:    ret
1802entry:
1803  tail call void @llvm.riscv.vsseg6.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, i64 %vl, i64 5)
1804  ret void
1805}
1806
1807define void @test_vsseg6_mask_nxv2i32_triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, i64 %vl, <vscale x 2 x i1> %mask) {
1808; CHECK-LABEL: test_vsseg6_mask_nxv2i32_triscv.vector.tuple_nxv8i8_6t:
1809; CHECK:       # %bb.0: # %entry
1810; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
1811; CHECK-NEXT:    vsseg6e32.v v8, (a0), v0.t
1812; CHECK-NEXT:    ret
1813entry:
1814  tail call void @llvm.riscv.vsseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl, i64 5)
1815  ret void
1816}
1817
1818declare void @llvm.riscv.vsseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 7), ptr, <vscale x 1 x i1>, i64, i64)
1819
1820define void @test_vsseg7_nxv1i32_triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) %val, ptr %base, i64 %vl) {
1821; CHECK-LABEL: test_vsseg7_nxv1i32_triscv.vector.tuple_nxv4i8_7t:
1822; CHECK:       # %bb.0: # %entry
1823; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
1824; CHECK-NEXT:    vsseg7e32.v v8, (a0)
1825; CHECK-NEXT:    ret
1826entry:
1827  tail call void @llvm.riscv.vsseg7.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) %val, ptr %base, i64 %vl, i64 5)
1828  ret void
1829}
1830
1831define void @test_vsseg7_mask_nxv1i32_triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) %val, ptr %base, i64 %vl, <vscale x 1 x i1> %mask) {
1832; CHECK-LABEL: test_vsseg7_mask_nxv1i32_triscv.vector.tuple_nxv4i8_7t:
1833; CHECK:       # %bb.0: # %entry
1834; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
1835; CHECK-NEXT:    vsseg7e32.v v8, (a0), v0.t
1836; CHECK-NEXT:    ret
1837entry:
1838  tail call void @llvm.riscv.vsseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl, i64 5)
1839  ret void
1840}
1841
1842declare void @llvm.riscv.vsseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 7), ptr, <vscale x 2 x i1>, i64, i64)
1843
1844define void @test_vsseg7_nxv2i32_triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, i64 %vl) {
1845; CHECK-LABEL: test_vsseg7_nxv2i32_triscv.vector.tuple_nxv8i8_7t:
1846; CHECK:       # %bb.0: # %entry
1847; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
1848; CHECK-NEXT:    vsseg7e32.v v8, (a0)
1849; CHECK-NEXT:    ret
1850entry:
1851  tail call void @llvm.riscv.vsseg7.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, i64 %vl, i64 5)
1852  ret void
1853}
1854
1855define void @test_vsseg7_mask_nxv2i32_triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, i64 %vl, <vscale x 2 x i1> %mask) {
1856; CHECK-LABEL: test_vsseg7_mask_nxv2i32_triscv.vector.tuple_nxv8i8_7t:
1857; CHECK:       # %bb.0: # %entry
1858; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
1859; CHECK-NEXT:    vsseg7e32.v v8, (a0), v0.t
1860; CHECK-NEXT:    ret
1861entry:
1862  tail call void @llvm.riscv.vsseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl, i64 5)
1863  ret void
1864}
1865
1866declare void @llvm.riscv.vsseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 8), ptr, <vscale x 1 x i1>, i64, i64)
1867
1868define void @test_vsseg8_nxv1i32_triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %val, ptr %base, i64 %vl) {
1869; CHECK-LABEL: test_vsseg8_nxv1i32_triscv.vector.tuple_nxv4i8_8t:
1870; CHECK:       # %bb.0: # %entry
1871; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
1872; CHECK-NEXT:    vsseg8e32.v v8, (a0)
1873; CHECK-NEXT:    ret
1874entry:
1875  tail call void @llvm.riscv.vsseg8.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %val, ptr %base, i64 %vl, i64 5)
1876  ret void
1877}
1878
1879define void @test_vsseg8_mask_nxv1i32_triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %val, ptr %base, i64 %vl, <vscale x 1 x i1> %mask) {
1880; CHECK-LABEL: test_vsseg8_mask_nxv1i32_triscv.vector.tuple_nxv4i8_8t:
1881; CHECK:       # %bb.0: # %entry
1882; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
1883; CHECK-NEXT:    vsseg8e32.v v8, (a0), v0.t
1884; CHECK-NEXT:    ret
1885entry:
1886  tail call void @llvm.riscv.vsseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl, i64 5)
1887  ret void
1888}
1889
1890declare void @llvm.riscv.vsseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 8), ptr, <vscale x 2 x i1>, i64, i64)
1891
1892define void @test_vsseg8_nxv2i32_triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, i64 %vl) {
1893; CHECK-LABEL: test_vsseg8_nxv2i32_triscv.vector.tuple_nxv8i8_8t:
1894; CHECK:       # %bb.0: # %entry
1895; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
1896; CHECK-NEXT:    vsseg8e32.v v8, (a0)
1897; CHECK-NEXT:    ret
1898entry:
1899  tail call void @llvm.riscv.vsseg8.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, i64 %vl, i64 5)
1900  ret void
1901}
1902
1903define void @test_vsseg8_mask_nxv2i32_triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, i64 %vl, <vscale x 2 x i1> %mask) {
1904; CHECK-LABEL: test_vsseg8_mask_nxv2i32_triscv.vector.tuple_nxv8i8_8t:
1905; CHECK:       # %bb.0: # %entry
1906; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
1907; CHECK-NEXT:    vsseg8e32.v v8, (a0), v0.t
1908; CHECK-NEXT:    ret
1909entry:
1910  tail call void @llvm.riscv.vsseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl, i64 5)
1911  ret void
1912}
1913
1914declare void @llvm.riscv.vsseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 2), ptr, <vscale x 1 x i1>, i64, i64)
1915
1916define void @test_vsseg2_nxv1i64_triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, i64 %vl) {
1917; CHECK-LABEL: test_vsseg2_nxv1i64_triscv.vector.tuple_nxv8i8_2t:
1918; CHECK:       # %bb.0: # %entry
1919; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
1920; CHECK-NEXT:    vsseg2e64.v v8, (a0)
1921; CHECK-NEXT:    ret
1922entry:
1923  tail call void @llvm.riscv.vsseg2.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, i64 %vl, i64 6)
1924  ret void
1925}
1926
1927define void @test_vsseg2_mask_nxv1i64_triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, i64 %vl, <vscale x 1 x i1> %mask) {
1928; CHECK-LABEL: test_vsseg2_mask_nxv1i64_triscv.vector.tuple_nxv8i8_2t:
1929; CHECK:       # %bb.0: # %entry
1930; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
1931; CHECK-NEXT:    vsseg2e64.v v8, (a0), v0.t
1932; CHECK-NEXT:    ret
1933entry:
1934  tail call void @llvm.riscv.vsseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl, i64 6)
1935  ret void
1936}
1937
1938declare void @llvm.riscv.vsseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 2), ptr, <vscale x 2 x i1>, i64, i64)
1939
1940define void @test_vsseg2_nxv2i64_triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, i64 %vl) {
1941; CHECK-LABEL: test_vsseg2_nxv2i64_triscv.vector.tuple_nxv16i8_2t:
1942; CHECK:       # %bb.0: # %entry
1943; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
1944; CHECK-NEXT:    vsseg2e64.v v8, (a0)
1945; CHECK-NEXT:    ret
1946entry:
1947  tail call void @llvm.riscv.vsseg2.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, i64 %vl, i64 6)
1948  ret void
1949}
1950
1951define void @test_vsseg2_mask_nxv2i64_triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, i64 %vl, <vscale x 2 x i1> %mask) {
1952; CHECK-LABEL: test_vsseg2_mask_nxv2i64_triscv.vector.tuple_nxv16i8_2t:
1953; CHECK:       # %bb.0: # %entry
1954; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
1955; CHECK-NEXT:    vsseg2e64.v v8, (a0), v0.t
1956; CHECK-NEXT:    ret
1957entry:
1958  tail call void @llvm.riscv.vsseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl, i64 6)
1959  ret void
1960}
1961
1962declare void @llvm.riscv.vsseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i1(target("riscv.vector.tuple", <vscale x 32 x i8>, 2), ptr, <vscale x 4 x i1>, i64, i64)
1963
1964define void @test_vsseg2_nxv4i64_triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, i64 %vl) {
1965; CHECK-LABEL: test_vsseg2_nxv4i64_triscv.vector.tuple_nxv32i8_2t:
1966; CHECK:       # %bb.0: # %entry
1967; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
1968; CHECK-NEXT:    vsseg2e64.v v8, (a0)
1969; CHECK-NEXT:    ret
1970entry:
1971  tail call void @llvm.riscv.vsseg2.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, i64 %vl, i64 6)
1972  ret void
1973}
1974
1975define void @test_vsseg2_mask_nxv4i64_triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, i64 %vl, <vscale x 4 x i1> %mask) {
1976; CHECK-LABEL: test_vsseg2_mask_nxv4i64_triscv.vector.tuple_nxv32i8_2t:
1977; CHECK:       # %bb.0: # %entry
1978; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
1979; CHECK-NEXT:    vsseg2e64.v v8, (a0), v0.t
1980; CHECK-NEXT:    ret
1981entry:
1982  tail call void @llvm.riscv.vsseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i1(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, <vscale x 4 x i1> %mask, i64 %vl, i64 6)
1983  ret void
1984}
1985
1986declare void @llvm.riscv.vsseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 3), ptr, <vscale x 1 x i1>, i64, i64)
1987
1988define void @test_vsseg3_nxv1i64_triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, i64 %vl) {
1989; CHECK-LABEL: test_vsseg3_nxv1i64_triscv.vector.tuple_nxv8i8_3t:
1990; CHECK:       # %bb.0: # %entry
1991; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
1992; CHECK-NEXT:    vsseg3e64.v v8, (a0)
1993; CHECK-NEXT:    ret
1994entry:
1995  tail call void @llvm.riscv.vsseg3.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, i64 %vl, i64 6)
1996  ret void
1997}
1998
1999define void @test_vsseg3_mask_nxv1i64_triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, i64 %vl, <vscale x 1 x i1> %mask) {
2000; CHECK-LABEL: test_vsseg3_mask_nxv1i64_triscv.vector.tuple_nxv8i8_3t:
2001; CHECK:       # %bb.0: # %entry
2002; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
2003; CHECK-NEXT:    vsseg3e64.v v8, (a0), v0.t
2004; CHECK-NEXT:    ret
2005entry:
2006  tail call void @llvm.riscv.vsseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl, i64 6)
2007  ret void
2008}
2009
2010declare void @llvm.riscv.vsseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 3), ptr, <vscale x 2 x i1>, i64, i64)
2011
2012define void @test_vsseg3_nxv2i64_triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, i64 %vl) {
2013; CHECK-LABEL: test_vsseg3_nxv2i64_triscv.vector.tuple_nxv16i8_3t:
2014; CHECK:       # %bb.0: # %entry
2015; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
2016; CHECK-NEXT:    vsseg3e64.v v8, (a0)
2017; CHECK-NEXT:    ret
2018entry:
2019  tail call void @llvm.riscv.vsseg3.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, i64 %vl, i64 6)
2020  ret void
2021}
2022
2023define void @test_vsseg3_mask_nxv2i64_triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, i64 %vl, <vscale x 2 x i1> %mask) {
2024; CHECK-LABEL: test_vsseg3_mask_nxv2i64_triscv.vector.tuple_nxv16i8_3t:
2025; CHECK:       # %bb.0: # %entry
2026; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
2027; CHECK-NEXT:    vsseg3e64.v v8, (a0), v0.t
2028; CHECK-NEXT:    ret
2029entry:
2030  tail call void @llvm.riscv.vsseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl, i64 6)
2031  ret void
2032}
2033
2034declare void @llvm.riscv.vsseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 4), ptr, <vscale x 1 x i1>, i64, i64)
2035
2036define void @test_vsseg4_nxv1i64_triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, i64 %vl) {
2037; CHECK-LABEL: test_vsseg4_nxv1i64_triscv.vector.tuple_nxv8i8_4t:
2038; CHECK:       # %bb.0: # %entry
2039; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
2040; CHECK-NEXT:    vsseg4e64.v v8, (a0)
2041; CHECK-NEXT:    ret
2042entry:
2043  tail call void @llvm.riscv.vsseg4.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, i64 %vl, i64 6)
2044  ret void
2045}
2046
2047define void @test_vsseg4_mask_nxv1i64_triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, i64 %vl, <vscale x 1 x i1> %mask) {
2048; CHECK-LABEL: test_vsseg4_mask_nxv1i64_triscv.vector.tuple_nxv8i8_4t:
2049; CHECK:       # %bb.0: # %entry
2050; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
2051; CHECK-NEXT:    vsseg4e64.v v8, (a0), v0.t
2052; CHECK-NEXT:    ret
2053entry:
2054  tail call void @llvm.riscv.vsseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl, i64 6)
2055  ret void
2056}
2057
2058declare void @llvm.riscv.vsseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 4), ptr, <vscale x 2 x i1>, i64, i64)
2059
2060define void @test_vsseg4_nxv2i64_triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, i64 %vl) {
2061; CHECK-LABEL: test_vsseg4_nxv2i64_triscv.vector.tuple_nxv16i8_4t:
2062; CHECK:       # %bb.0: # %entry
2063; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
2064; CHECK-NEXT:    vsseg4e64.v v8, (a0)
2065; CHECK-NEXT:    ret
2066entry:
2067  tail call void @llvm.riscv.vsseg4.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, i64 %vl, i64 6)
2068  ret void
2069}
2070
2071define void @test_vsseg4_mask_nxv2i64_triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, i64 %vl, <vscale x 2 x i1> %mask) {
2072; CHECK-LABEL: test_vsseg4_mask_nxv2i64_triscv.vector.tuple_nxv16i8_4t:
2073; CHECK:       # %bb.0: # %entry
2074; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
2075; CHECK-NEXT:    vsseg4e64.v v8, (a0), v0.t
2076; CHECK-NEXT:    ret
2077entry:
2078  tail call void @llvm.riscv.vsseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl, i64 6)
2079  ret void
2080}
2081
2082declare void @llvm.riscv.vsseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 5), ptr, <vscale x 1 x i1>, i64, i64)
2083
2084define void @test_vsseg5_nxv1i64_triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, i64 %vl) {
2085; CHECK-LABEL: test_vsseg5_nxv1i64_triscv.vector.tuple_nxv8i8_5t:
2086; CHECK:       # %bb.0: # %entry
2087; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
2088; CHECK-NEXT:    vsseg5e64.v v8, (a0)
2089; CHECK-NEXT:    ret
2090entry:
2091  tail call void @llvm.riscv.vsseg5.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, i64 %vl, i64 6)
2092  ret void
2093}
2094
2095define void @test_vsseg5_mask_nxv1i64_triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, i64 %vl, <vscale x 1 x i1> %mask) {
2096; CHECK-LABEL: test_vsseg5_mask_nxv1i64_triscv.vector.tuple_nxv8i8_5t:
2097; CHECK:       # %bb.0: # %entry
2098; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
2099; CHECK-NEXT:    vsseg5e64.v v8, (a0), v0.t
2100; CHECK-NEXT:    ret
2101entry:
2102  tail call void @llvm.riscv.vsseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl, i64 6)
2103  ret void
2104}
2105
2106declare void @llvm.riscv.vsseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 6), ptr, <vscale x 1 x i1>, i64, i64)
2107
2108define void @test_vsseg6_nxv1i64_triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, i64 %vl) {
2109; CHECK-LABEL: test_vsseg6_nxv1i64_triscv.vector.tuple_nxv8i8_6t:
2110; CHECK:       # %bb.0: # %entry
2111; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
2112; CHECK-NEXT:    vsseg6e64.v v8, (a0)
2113; CHECK-NEXT:    ret
2114entry:
2115  tail call void @llvm.riscv.vsseg6.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, i64 %vl, i64 6)
2116  ret void
2117}
2118
2119define void @test_vsseg6_mask_nxv1i64_triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, i64 %vl, <vscale x 1 x i1> %mask) {
2120; CHECK-LABEL: test_vsseg6_mask_nxv1i64_triscv.vector.tuple_nxv8i8_6t:
2121; CHECK:       # %bb.0: # %entry
2122; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
2123; CHECK-NEXT:    vsseg6e64.v v8, (a0), v0.t
2124; CHECK-NEXT:    ret
2125entry:
2126  tail call void @llvm.riscv.vsseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl, i64 6)
2127  ret void
2128}
2129
2130declare void @llvm.riscv.vsseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 7), ptr, <vscale x 1 x i1>, i64, i64)
2131
2132define void @test_vsseg7_nxv1i64_triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, i64 %vl) {
2133; CHECK-LABEL: test_vsseg7_nxv1i64_triscv.vector.tuple_nxv8i8_7t:
2134; CHECK:       # %bb.0: # %entry
2135; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
2136; CHECK-NEXT:    vsseg7e64.v v8, (a0)
2137; CHECK-NEXT:    ret
2138entry:
2139  tail call void @llvm.riscv.vsseg7.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, i64 %vl, i64 6)
2140  ret void
2141}
2142
2143define void @test_vsseg7_mask_nxv1i64_triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, i64 %vl, <vscale x 1 x i1> %mask) {
2144; CHECK-LABEL: test_vsseg7_mask_nxv1i64_triscv.vector.tuple_nxv8i8_7t:
2145; CHECK:       # %bb.0: # %entry
2146; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
2147; CHECK-NEXT:    vsseg7e64.v v8, (a0), v0.t
2148; CHECK-NEXT:    ret
2149entry:
2150  tail call void @llvm.riscv.vsseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl, i64 6)
2151  ret void
2152}
2153
2154declare void @llvm.riscv.vsseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 8), ptr, <vscale x 1 x i1>, i64, i64)
2155
2156define void @test_vsseg8_nxv1i64_triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, i64 %vl) {
2157; CHECK-LABEL: test_vsseg8_nxv1i64_triscv.vector.tuple_nxv8i8_8t:
2158; CHECK:       # %bb.0: # %entry
2159; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
2160; CHECK-NEXT:    vsseg8e64.v v8, (a0)
2161; CHECK-NEXT:    ret
2162entry:
2163  tail call void @llvm.riscv.vsseg8.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, i64 %vl, i64 6)
2164  ret void
2165}
2166
2167define void @test_vsseg8_mask_nxv1i64_triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, i64 %vl, <vscale x 1 x i1> %mask) {
2168; CHECK-LABEL: test_vsseg8_mask_nxv1i64_triscv.vector.tuple_nxv8i8_8t:
2169; CHECK:       # %bb.0: # %entry
2170; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
2171; CHECK-NEXT:    vsseg8e64.v v8, (a0), v0.t
2172; CHECK-NEXT:    ret
2173entry:
2174  tail call void @llvm.riscv.vsseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl, i64 6)
2175  ret void
2176}
2177
2178
2179define void @test_vsseg2_nxv1f16_triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) %val, ptr %base, i64 %vl) {
2180; CHECK-LABEL: test_vsseg2_nxv1f16_triscv.vector.tuple_nxv2i8_2t:
2181; CHECK:       # %bb.0: # %entry
2182; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
2183; CHECK-NEXT:    vsseg2e16.v v8, (a0)
2184; CHECK-NEXT:    ret
2185entry:
2186  tail call void @llvm.riscv.vsseg2.triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) %val, ptr %base, i64 %vl, i64 4)
2187  ret void
2188}
2189
2190define void @test_vsseg2_mask_nxv1f16_triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) %val, ptr %base, i64 %vl, <vscale x 1 x i1> %mask) {
2191; CHECK-LABEL: test_vsseg2_mask_nxv1f16_triscv.vector.tuple_nxv2i8_2t:
2192; CHECK:       # %bb.0: # %entry
2193; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
2194; CHECK-NEXT:    vsseg2e16.v v8, (a0), v0.t
2195; CHECK-NEXT:    ret
2196entry:
2197  tail call void @llvm.riscv.vsseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl, i64 4)
2198  ret void
2199}
2200
2201
2202define void @test_vsseg2_nxv2f16_triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %val, ptr %base, i64 %vl) {
2203; CHECK-LABEL: test_vsseg2_nxv2f16_triscv.vector.tuple_nxv4i8_2t:
2204; CHECK:       # %bb.0: # %entry
2205; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
2206; CHECK-NEXT:    vsseg2e16.v v8, (a0)
2207; CHECK-NEXT:    ret
2208entry:
2209  tail call void @llvm.riscv.vsseg2.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %val, ptr %base, i64 %vl, i64 4)
2210  ret void
2211}
2212
2213define void @test_vsseg2_mask_nxv2f16_triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %val, ptr %base, i64 %vl, <vscale x 2 x i1> %mask) {
2214; CHECK-LABEL: test_vsseg2_mask_nxv2f16_triscv.vector.tuple_nxv4i8_2t:
2215; CHECK:       # %bb.0: # %entry
2216; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
2217; CHECK-NEXT:    vsseg2e16.v v8, (a0), v0.t
2218; CHECK-NEXT:    ret
2219entry:
2220  tail call void @llvm.riscv.vsseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl, i64 4)
2221  ret void
2222}
2223
2224
2225define void @test_vsseg2_nxv4f16_triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, i64 %vl) {
2226; CHECK-LABEL: test_vsseg2_nxv4f16_triscv.vector.tuple_nxv8i8_2t:
2227; CHECK:       # %bb.0: # %entry
2228; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
2229; CHECK-NEXT:    vsseg2e16.v v8, (a0)
2230; CHECK-NEXT:    ret
2231entry:
2232  tail call void @llvm.riscv.vsseg2.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, i64 %vl, i64 4)
2233  ret void
2234}
2235
2236define void @test_vsseg2_mask_nxv4f16_triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, i64 %vl, <vscale x 4 x i1> %mask) {
2237; CHECK-LABEL: test_vsseg2_mask_nxv4f16_triscv.vector.tuple_nxv8i8_2t:
2238; CHECK:       # %bb.0: # %entry
2239; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
2240; CHECK-NEXT:    vsseg2e16.v v8, (a0), v0.t
2241; CHECK-NEXT:    ret
2242entry:
2243  tail call void @llvm.riscv.vsseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, <vscale x 4 x i1> %mask, i64 %vl, i64 4)
2244  ret void
2245}
2246
2247
2248define void @test_vsseg2_nxv8f16_triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, i64 %vl) {
2249; CHECK-LABEL: test_vsseg2_nxv8f16_triscv.vector.tuple_nxv16i8_2t:
2250; CHECK:       # %bb.0: # %entry
2251; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
2252; CHECK-NEXT:    vsseg2e16.v v8, (a0)
2253; CHECK-NEXT:    ret
2254entry:
2255  tail call void @llvm.riscv.vsseg2.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, i64 %vl, i64 4)
2256  ret void
2257}
2258
2259define void @test_vsseg2_mask_nxv8f16_triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, i64 %vl, <vscale x 8 x i1> %mask) {
2260; CHECK-LABEL: test_vsseg2_mask_nxv8f16_triscv.vector.tuple_nxv16i8_2t:
2261; CHECK:       # %bb.0: # %entry
2262; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
2263; CHECK-NEXT:    vsseg2e16.v v8, (a0), v0.t
2264; CHECK-NEXT:    ret
2265entry:
2266  tail call void @llvm.riscv.vsseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, <vscale x 8 x i1> %mask, i64 %vl, i64 4)
2267  ret void
2268}
2269
2270
2271define void @test_vsseg2_nxv16f16_triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, i64 %vl) {
2272; CHECK-LABEL: test_vsseg2_nxv16f16_triscv.vector.tuple_nxv32i8_2t:
2273; CHECK:       # %bb.0: # %entry
2274; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
2275; CHECK-NEXT:    vsseg2e16.v v8, (a0)
2276; CHECK-NEXT:    ret
2277entry:
2278  tail call void @llvm.riscv.vsseg2.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, i64 %vl, i64 4)
2279  ret void
2280}
2281
2282define void @test_vsseg2_mask_nxv16f16_triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, i64 %vl, <vscale x 16 x i1> %mask) {
2283; CHECK-LABEL: test_vsseg2_mask_nxv16f16_triscv.vector.tuple_nxv32i8_2t:
2284; CHECK:       # %bb.0: # %entry
2285; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
2286; CHECK-NEXT:    vsseg2e16.v v8, (a0), v0.t
2287; CHECK-NEXT:    ret
2288entry:
2289  tail call void @llvm.riscv.vsseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i1(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, <vscale x 16 x i1> %mask, i64 %vl, i64 4)
2290  ret void
2291}
2292
2293
2294define void @test_vsseg3_nxv1f16_triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) %val, ptr %base, i64 %vl) {
2295; CHECK-LABEL: test_vsseg3_nxv1f16_triscv.vector.tuple_nxv2i8_3t:
2296; CHECK:       # %bb.0: # %entry
2297; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
2298; CHECK-NEXT:    vsseg3e16.v v8, (a0)
2299; CHECK-NEXT:    ret
2300entry:
2301  tail call void @llvm.riscv.vsseg3.triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) %val, ptr %base, i64 %vl, i64 4)
2302  ret void
2303}
2304
2305define void @test_vsseg3_mask_nxv1f16_triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) %val, ptr %base, i64 %vl, <vscale x 1 x i1> %mask) {
2306; CHECK-LABEL: test_vsseg3_mask_nxv1f16_triscv.vector.tuple_nxv2i8_3t:
2307; CHECK:       # %bb.0: # %entry
2308; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
2309; CHECK-NEXT:    vsseg3e16.v v8, (a0), v0.t
2310; CHECK-NEXT:    ret
2311entry:
2312  tail call void @llvm.riscv.vsseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl, i64 4)
2313  ret void
2314}
2315
2316
2317define void @test_vsseg3_nxv2f16_triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %val, ptr %base, i64 %vl) {
2318; CHECK-LABEL: test_vsseg3_nxv2f16_triscv.vector.tuple_nxv4i8_3t:
2319; CHECK:       # %bb.0: # %entry
2320; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
2321; CHECK-NEXT:    vsseg3e16.v v8, (a0)
2322; CHECK-NEXT:    ret
2323entry:
2324  tail call void @llvm.riscv.vsseg3.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %val, ptr %base, i64 %vl, i64 4)
2325  ret void
2326}
2327
2328define void @test_vsseg3_mask_nxv2f16_triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %val, ptr %base, i64 %vl, <vscale x 2 x i1> %mask) {
2329; CHECK-LABEL: test_vsseg3_mask_nxv2f16_triscv.vector.tuple_nxv4i8_3t:
2330; CHECK:       # %bb.0: # %entry
2331; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
2332; CHECK-NEXT:    vsseg3e16.v v8, (a0), v0.t
2333; CHECK-NEXT:    ret
2334entry:
2335  tail call void @llvm.riscv.vsseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl, i64 4)
2336  ret void
2337}
2338
2339
2340define void @test_vsseg3_nxv4f16_triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, i64 %vl) {
2341; CHECK-LABEL: test_vsseg3_nxv4f16_triscv.vector.tuple_nxv8i8_3t:
2342; CHECK:       # %bb.0: # %entry
2343; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
2344; CHECK-NEXT:    vsseg3e16.v v8, (a0)
2345; CHECK-NEXT:    ret
2346entry:
2347  tail call void @llvm.riscv.vsseg3.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, i64 %vl, i64 4)
2348  ret void
2349}
2350
2351define void @test_vsseg3_mask_nxv4f16_triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, i64 %vl, <vscale x 4 x i1> %mask) {
2352; CHECK-LABEL: test_vsseg3_mask_nxv4f16_triscv.vector.tuple_nxv8i8_3t:
2353; CHECK:       # %bb.0: # %entry
2354; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
2355; CHECK-NEXT:    vsseg3e16.v v8, (a0), v0.t
2356; CHECK-NEXT:    ret
2357entry:
2358  tail call void @llvm.riscv.vsseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, <vscale x 4 x i1> %mask, i64 %vl, i64 4)
2359  ret void
2360}
2361
2362
2363define void @test_vsseg3_nxv8f16_triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, i64 %vl) {
2364; CHECK-LABEL: test_vsseg3_nxv8f16_triscv.vector.tuple_nxv16i8_3t:
2365; CHECK:       # %bb.0: # %entry
2366; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
2367; CHECK-NEXT:    vsseg3e16.v v8, (a0)
2368; CHECK-NEXT:    ret
2369entry:
2370  tail call void @llvm.riscv.vsseg3.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, i64 %vl, i64 4)
2371  ret void
2372}
2373
2374define void @test_vsseg3_mask_nxv8f16_triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, i64 %vl, <vscale x 8 x i1> %mask) {
2375; CHECK-LABEL: test_vsseg3_mask_nxv8f16_triscv.vector.tuple_nxv16i8_3t:
2376; CHECK:       # %bb.0: # %entry
2377; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
2378; CHECK-NEXT:    vsseg3e16.v v8, (a0), v0.t
2379; CHECK-NEXT:    ret
2380entry:
2381  tail call void @llvm.riscv.vsseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, <vscale x 8 x i1> %mask, i64 %vl, i64 4)
2382  ret void
2383}
2384
2385
2386define void @test_vsseg4_nxv1f16_triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) %val, ptr %base, i64 %vl) {
2387; CHECK-LABEL: test_vsseg4_nxv1f16_triscv.vector.tuple_nxv2i8_4t:
2388; CHECK:       # %bb.0: # %entry
2389; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
2390; CHECK-NEXT:    vsseg4e16.v v8, (a0)
2391; CHECK-NEXT:    ret
2392entry:
2393  tail call void @llvm.riscv.vsseg4.triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) %val, ptr %base, i64 %vl, i64 4)
2394  ret void
2395}
2396
2397define void @test_vsseg4_mask_nxv1f16_triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) %val, ptr %base, i64 %vl, <vscale x 1 x i1> %mask) {
2398; CHECK-LABEL: test_vsseg4_mask_nxv1f16_triscv.vector.tuple_nxv2i8_4t:
2399; CHECK:       # %bb.0: # %entry
2400; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
2401; CHECK-NEXT:    vsseg4e16.v v8, (a0), v0.t
2402; CHECK-NEXT:    ret
2403entry:
2404  tail call void @llvm.riscv.vsseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl, i64 4)
2405  ret void
2406}
2407
2408
2409define void @test_vsseg4_nxv2f16_triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %val, ptr %base, i64 %vl) {
2410; CHECK-LABEL: test_vsseg4_nxv2f16_triscv.vector.tuple_nxv4i8_4t:
2411; CHECK:       # %bb.0: # %entry
2412; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
2413; CHECK-NEXT:    vsseg4e16.v v8, (a0)
2414; CHECK-NEXT:    ret
2415entry:
2416  tail call void @llvm.riscv.vsseg4.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %val, ptr %base, i64 %vl, i64 4)
2417  ret void
2418}
2419
2420define void @test_vsseg4_mask_nxv2f16_triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %val, ptr %base, i64 %vl, <vscale x 2 x i1> %mask) {
2421; CHECK-LABEL: test_vsseg4_mask_nxv2f16_triscv.vector.tuple_nxv4i8_4t:
2422; CHECK:       # %bb.0: # %entry
2423; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
2424; CHECK-NEXT:    vsseg4e16.v v8, (a0), v0.t
2425; CHECK-NEXT:    ret
2426entry:
2427  tail call void @llvm.riscv.vsseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl, i64 4)
2428  ret void
2429}
2430
2431
2432define void @test_vsseg4_nxv4f16_triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, i64 %vl) {
2433; CHECK-LABEL: test_vsseg4_nxv4f16_triscv.vector.tuple_nxv8i8_4t:
2434; CHECK:       # %bb.0: # %entry
2435; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
2436; CHECK-NEXT:    vsseg4e16.v v8, (a0)
2437; CHECK-NEXT:    ret
2438entry:
2439  tail call void @llvm.riscv.vsseg4.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, i64 %vl, i64 4)
2440  ret void
2441}
2442
2443define void @test_vsseg4_mask_nxv4f16_triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, i64 %vl, <vscale x 4 x i1> %mask) {
2444; CHECK-LABEL: test_vsseg4_mask_nxv4f16_triscv.vector.tuple_nxv8i8_4t:
2445; CHECK:       # %bb.0: # %entry
2446; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
2447; CHECK-NEXT:    vsseg4e16.v v8, (a0), v0.t
2448; CHECK-NEXT:    ret
2449entry:
2450  tail call void @llvm.riscv.vsseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, <vscale x 4 x i1> %mask, i64 %vl, i64 4)
2451  ret void
2452}
2453
2454
2455define void @test_vsseg4_nxv8f16_triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, i64 %vl) {
2456; CHECK-LABEL: test_vsseg4_nxv8f16_triscv.vector.tuple_nxv16i8_4t:
2457; CHECK:       # %bb.0: # %entry
2458; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
2459; CHECK-NEXT:    vsseg4e16.v v8, (a0)
2460; CHECK-NEXT:    ret
2461entry:
2462  tail call void @llvm.riscv.vsseg4.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, i64 %vl, i64 4)
2463  ret void
2464}
2465
2466define void @test_vsseg4_mask_nxv8f16_triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, i64 %vl, <vscale x 8 x i1> %mask) {
2467; CHECK-LABEL: test_vsseg4_mask_nxv8f16_triscv.vector.tuple_nxv16i8_4t:
2468; CHECK:       # %bb.0: # %entry
2469; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
2470; CHECK-NEXT:    vsseg4e16.v v8, (a0), v0.t
2471; CHECK-NEXT:    ret
2472entry:
2473  tail call void @llvm.riscv.vsseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, <vscale x 8 x i1> %mask, i64 %vl, i64 4)
2474  ret void
2475}
2476
2477
2478define void @test_vsseg5_nxv1f16_triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) %val, ptr %base, i64 %vl) {
2479; CHECK-LABEL: test_vsseg5_nxv1f16_triscv.vector.tuple_nxv2i8_5t:
2480; CHECK:       # %bb.0: # %entry
2481; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
2482; CHECK-NEXT:    vsseg5e16.v v8, (a0)
2483; CHECK-NEXT:    ret
2484entry:
2485  tail call void @llvm.riscv.vsseg5.triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) %val, ptr %base, i64 %vl, i64 4)
2486  ret void
2487}
2488
2489define void @test_vsseg5_mask_nxv1f16_triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) %val, ptr %base, i64 %vl, <vscale x 1 x i1> %mask) {
2490; CHECK-LABEL: test_vsseg5_mask_nxv1f16_triscv.vector.tuple_nxv2i8_5t:
2491; CHECK:       # %bb.0: # %entry
2492; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
2493; CHECK-NEXT:    vsseg5e16.v v8, (a0), v0.t
2494; CHECK-NEXT:    ret
2495entry:
2496  tail call void @llvm.riscv.vsseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl, i64 4)
2497  ret void
2498}
2499
2500
2501define void @test_vsseg5_nxv2f16_triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) %val, ptr %base, i64 %vl) {
2502; CHECK-LABEL: test_vsseg5_nxv2f16_triscv.vector.tuple_nxv4i8_5t:
2503; CHECK:       # %bb.0: # %entry
2504; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
2505; CHECK-NEXT:    vsseg5e16.v v8, (a0)
2506; CHECK-NEXT:    ret
2507entry:
2508  tail call void @llvm.riscv.vsseg5.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) %val, ptr %base, i64 %vl, i64 4)
2509  ret void
2510}
2511
2512define void @test_vsseg5_mask_nxv2f16_triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) %val, ptr %base, i64 %vl, <vscale x 2 x i1> %mask) {
2513; CHECK-LABEL: test_vsseg5_mask_nxv2f16_triscv.vector.tuple_nxv4i8_5t:
2514; CHECK:       # %bb.0: # %entry
2515; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
2516; CHECK-NEXT:    vsseg5e16.v v8, (a0), v0.t
2517; CHECK-NEXT:    ret
2518entry:
2519  tail call void @llvm.riscv.vsseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl, i64 4)
2520  ret void
2521}
2522
2523
2524define void @test_vsseg5_nxv4f16_triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, i64 %vl) {
2525; CHECK-LABEL: test_vsseg5_nxv4f16_triscv.vector.tuple_nxv8i8_5t:
2526; CHECK:       # %bb.0: # %entry
2527; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
2528; CHECK-NEXT:    vsseg5e16.v v8, (a0)
2529; CHECK-NEXT:    ret
2530entry:
2531  tail call void @llvm.riscv.vsseg5.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, i64 %vl, i64 4)
2532  ret void
2533}
2534
2535define void @test_vsseg5_mask_nxv4f16_triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, i64 %vl, <vscale x 4 x i1> %mask) {
2536; CHECK-LABEL: test_vsseg5_mask_nxv4f16_triscv.vector.tuple_nxv8i8_5t:
2537; CHECK:       # %bb.0: # %entry
2538; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
2539; CHECK-NEXT:    vsseg5e16.v v8, (a0), v0.t
2540; CHECK-NEXT:    ret
2541entry:
2542  tail call void @llvm.riscv.vsseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, <vscale x 4 x i1> %mask, i64 %vl, i64 4)
2543  ret void
2544}
2545
2546
2547define void @test_vsseg6_nxv1f16_triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) %val, ptr %base, i64 %vl) {
2548; CHECK-LABEL: test_vsseg6_nxv1f16_triscv.vector.tuple_nxv2i8_6t:
2549; CHECK:       # %bb.0: # %entry
2550; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
2551; CHECK-NEXT:    vsseg6e16.v v8, (a0)
2552; CHECK-NEXT:    ret
2553entry:
2554  tail call void @llvm.riscv.vsseg6.triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) %val, ptr %base, i64 %vl, i64 4)
2555  ret void
2556}
2557
2558define void @test_vsseg6_mask_nxv1f16_triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) %val, ptr %base, i64 %vl, <vscale x 1 x i1> %mask) {
2559; CHECK-LABEL: test_vsseg6_mask_nxv1f16_triscv.vector.tuple_nxv2i8_6t:
2560; CHECK:       # %bb.0: # %entry
2561; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
2562; CHECK-NEXT:    vsseg6e16.v v8, (a0), v0.t
2563; CHECK-NEXT:    ret
2564entry:
2565  tail call void @llvm.riscv.vsseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl, i64 4)
2566  ret void
2567}
2568
2569
2570define void @test_vsseg6_nxv2f16_triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) %val, ptr %base, i64 %vl) {
2571; CHECK-LABEL: test_vsseg6_nxv2f16_triscv.vector.tuple_nxv4i8_6t:
2572; CHECK:       # %bb.0: # %entry
2573; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
2574; CHECK-NEXT:    vsseg6e16.v v8, (a0)
2575; CHECK-NEXT:    ret
2576entry:
2577  tail call void @llvm.riscv.vsseg6.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) %val, ptr %base, i64 %vl, i64 4)
2578  ret void
2579}
2580
2581define void @test_vsseg6_mask_nxv2f16_triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) %val, ptr %base, i64 %vl, <vscale x 2 x i1> %mask) {
2582; CHECK-LABEL: test_vsseg6_mask_nxv2f16_triscv.vector.tuple_nxv4i8_6t:
2583; CHECK:       # %bb.0: # %entry
2584; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
2585; CHECK-NEXT:    vsseg6e16.v v8, (a0), v0.t
2586; CHECK-NEXT:    ret
2587entry:
2588  tail call void @llvm.riscv.vsseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl, i64 4)
2589  ret void
2590}
2591
2592
2593define void @test_vsseg6_nxv4f16_triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, i64 %vl) {
2594; CHECK-LABEL: test_vsseg6_nxv4f16_triscv.vector.tuple_nxv8i8_6t:
2595; CHECK:       # %bb.0: # %entry
2596; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
2597; CHECK-NEXT:    vsseg6e16.v v8, (a0)
2598; CHECK-NEXT:    ret
2599entry:
2600  tail call void @llvm.riscv.vsseg6.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, i64 %vl, i64 4)
2601  ret void
2602}
2603
2604define void @test_vsseg6_mask_nxv4f16_triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, i64 %vl, <vscale x 4 x i1> %mask) {
2605; CHECK-LABEL: test_vsseg6_mask_nxv4f16_triscv.vector.tuple_nxv8i8_6t:
2606; CHECK:       # %bb.0: # %entry
2607; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
2608; CHECK-NEXT:    vsseg6e16.v v8, (a0), v0.t
2609; CHECK-NEXT:    ret
2610entry:
2611  tail call void @llvm.riscv.vsseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, <vscale x 4 x i1> %mask, i64 %vl, i64 4)
2612  ret void
2613}
2614
2615
2616define void @test_vsseg7_nxv1f16_triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) %val, ptr %base, i64 %vl) {
2617; CHECK-LABEL: test_vsseg7_nxv1f16_triscv.vector.tuple_nxv2i8_7t:
2618; CHECK:       # %bb.0: # %entry
2619; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
2620; CHECK-NEXT:    vsseg7e16.v v8, (a0)
2621; CHECK-NEXT:    ret
2622entry:
2623  tail call void @llvm.riscv.vsseg7.triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) %val, ptr %base, i64 %vl, i64 4)
2624  ret void
2625}
2626
2627define void @test_vsseg7_mask_nxv1f16_triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) %val, ptr %base, i64 %vl, <vscale x 1 x i1> %mask) {
2628; CHECK-LABEL: test_vsseg7_mask_nxv1f16_triscv.vector.tuple_nxv2i8_7t:
2629; CHECK:       # %bb.0: # %entry
2630; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
2631; CHECK-NEXT:    vsseg7e16.v v8, (a0), v0.t
2632; CHECK-NEXT:    ret
2633entry:
2634  tail call void @llvm.riscv.vsseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl, i64 4)
2635  ret void
2636}
2637
2638
2639define void @test_vsseg7_nxv2f16_triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) %val, ptr %base, i64 %vl) {
2640; CHECK-LABEL: test_vsseg7_nxv2f16_triscv.vector.tuple_nxv4i8_7t:
2641; CHECK:       # %bb.0: # %entry
2642; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
2643; CHECK-NEXT:    vsseg7e16.v v8, (a0)
2644; CHECK-NEXT:    ret
2645entry:
2646  tail call void @llvm.riscv.vsseg7.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) %val, ptr %base, i64 %vl, i64 4)
2647  ret void
2648}
2649
2650define void @test_vsseg7_mask_nxv2f16_triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) %val, ptr %base, i64 %vl, <vscale x 2 x i1> %mask) {
2651; CHECK-LABEL: test_vsseg7_mask_nxv2f16_triscv.vector.tuple_nxv4i8_7t:
2652; CHECK:       # %bb.0: # %entry
2653; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
2654; CHECK-NEXT:    vsseg7e16.v v8, (a0), v0.t
2655; CHECK-NEXT:    ret
2656entry:
2657  tail call void @llvm.riscv.vsseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl, i64 4)
2658  ret void
2659}
2660
2661
2662define void @test_vsseg7_nxv4f16_triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, i64 %vl) {
2663; CHECK-LABEL: test_vsseg7_nxv4f16_triscv.vector.tuple_nxv8i8_7t:
2664; CHECK:       # %bb.0: # %entry
2665; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
2666; CHECK-NEXT:    vsseg7e16.v v8, (a0)
2667; CHECK-NEXT:    ret
2668entry:
2669  tail call void @llvm.riscv.vsseg7.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, i64 %vl, i64 4)
2670  ret void
2671}
2672
2673define void @test_vsseg7_mask_nxv4f16_triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, i64 %vl, <vscale x 4 x i1> %mask) {
2674; CHECK-LABEL: test_vsseg7_mask_nxv4f16_triscv.vector.tuple_nxv8i8_7t:
2675; CHECK:       # %bb.0: # %entry
2676; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
2677; CHECK-NEXT:    vsseg7e16.v v8, (a0), v0.t
2678; CHECK-NEXT:    ret
2679entry:
2680  tail call void @llvm.riscv.vsseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, <vscale x 4 x i1> %mask, i64 %vl, i64 4)
2681  ret void
2682}
2683
2684
2685define void @test_vsseg8_nxv1f16_triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) %val, ptr %base, i64 %vl) {
2686; CHECK-LABEL: test_vsseg8_nxv1f16_triscv.vector.tuple_nxv2i8_8t:
2687; CHECK:       # %bb.0: # %entry
2688; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
2689; CHECK-NEXT:    vsseg8e16.v v8, (a0)
2690; CHECK-NEXT:    ret
2691entry:
2692  tail call void @llvm.riscv.vsseg8.triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) %val, ptr %base, i64 %vl, i64 4)
2693  ret void
2694}
2695
2696define void @test_vsseg8_mask_nxv1f16_triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) %val, ptr %base, i64 %vl, <vscale x 1 x i1> %mask) {
2697; CHECK-LABEL: test_vsseg8_mask_nxv1f16_triscv.vector.tuple_nxv2i8_8t:
2698; CHECK:       # %bb.0: # %entry
2699; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
2700; CHECK-NEXT:    vsseg8e16.v v8, (a0), v0.t
2701; CHECK-NEXT:    ret
2702entry:
2703  tail call void @llvm.riscv.vsseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl, i64 4)
2704  ret void
2705}
2706
2707
2708define void @test_vsseg8_nxv2f16_triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %val, ptr %base, i64 %vl) {
2709; CHECK-LABEL: test_vsseg8_nxv2f16_triscv.vector.tuple_nxv4i8_8t:
2710; CHECK:       # %bb.0: # %entry
2711; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
2712; CHECK-NEXT:    vsseg8e16.v v8, (a0)
2713; CHECK-NEXT:    ret
2714entry:
2715  tail call void @llvm.riscv.vsseg8.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %val, ptr %base, i64 %vl, i64 4)
2716  ret void
2717}
2718
2719define void @test_vsseg8_mask_nxv2f16_triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %val, ptr %base, i64 %vl, <vscale x 2 x i1> %mask) {
2720; CHECK-LABEL: test_vsseg8_mask_nxv2f16_triscv.vector.tuple_nxv4i8_8t:
2721; CHECK:       # %bb.0: # %entry
2722; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
2723; CHECK-NEXT:    vsseg8e16.v v8, (a0), v0.t
2724; CHECK-NEXT:    ret
2725entry:
2726  tail call void @llvm.riscv.vsseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl, i64 4)
2727  ret void
2728}
2729
2730
2731define void @test_vsseg8_nxv4f16_triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, i64 %vl) {
2732; CHECK-LABEL: test_vsseg8_nxv4f16_triscv.vector.tuple_nxv8i8_8t:
2733; CHECK:       # %bb.0: # %entry
2734; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
2735; CHECK-NEXT:    vsseg8e16.v v8, (a0)
2736; CHECK-NEXT:    ret
2737entry:
2738  tail call void @llvm.riscv.vsseg8.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, i64 %vl, i64 4)
2739  ret void
2740}
2741
2742define void @test_vsseg8_mask_nxv4f16_triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, i64 %vl, <vscale x 4 x i1> %mask) {
2743; CHECK-LABEL: test_vsseg8_mask_nxv4f16_triscv.vector.tuple_nxv8i8_8t:
2744; CHECK:       # %bb.0: # %entry
2745; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
2746; CHECK-NEXT:    vsseg8e16.v v8, (a0), v0.t
2747; CHECK-NEXT:    ret
2748entry:
2749  tail call void @llvm.riscv.vsseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, <vscale x 4 x i1> %mask, i64 %vl, i64 4)
2750  ret void
2751}
2752
2753
2754define void @test_vsseg2_nxv1f32_triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %val, ptr %base, i64 %vl) {
2755; CHECK-LABEL: test_vsseg2_nxv1f32_triscv.vector.tuple_nxv4i8_2t:
2756; CHECK:       # %bb.0: # %entry
2757; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
2758; CHECK-NEXT:    vsseg2e32.v v8, (a0)
2759; CHECK-NEXT:    ret
2760entry:
2761  tail call void @llvm.riscv.vsseg2.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %val, ptr %base, i64 %vl, i64 5)
2762  ret void
2763}
2764
2765define void @test_vsseg2_mask_nxv1f32_triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %val, ptr %base, i64 %vl, <vscale x 1 x i1> %mask) {
2766; CHECK-LABEL: test_vsseg2_mask_nxv1f32_triscv.vector.tuple_nxv4i8_2t:
2767; CHECK:       # %bb.0: # %entry
2768; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
2769; CHECK-NEXT:    vsseg2e32.v v8, (a0), v0.t
2770; CHECK-NEXT:    ret
2771entry:
2772  tail call void @llvm.riscv.vsseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl, i64 5)
2773  ret void
2774}
2775
2776
2777define void @test_vsseg2_nxv2f32_triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, i64 %vl) {
2778; CHECK-LABEL: test_vsseg2_nxv2f32_triscv.vector.tuple_nxv8i8_2t:
2779; CHECK:       # %bb.0: # %entry
2780; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
2781; CHECK-NEXT:    vsseg2e32.v v8, (a0)
2782; CHECK-NEXT:    ret
2783entry:
2784  tail call void @llvm.riscv.vsseg2.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, i64 %vl, i64 5)
2785  ret void
2786}
2787
2788define void @test_vsseg2_mask_nxv2f32_triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, i64 %vl, <vscale x 2 x i1> %mask) {
2789; CHECK-LABEL: test_vsseg2_mask_nxv2f32_triscv.vector.tuple_nxv8i8_2t:
2790; CHECK:       # %bb.0: # %entry
2791; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
2792; CHECK-NEXT:    vsseg2e32.v v8, (a0), v0.t
2793; CHECK-NEXT:    ret
2794entry:
2795  tail call void @llvm.riscv.vsseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl, i64 5)
2796  ret void
2797}
2798
2799
2800define void @test_vsseg2_nxv4f32_triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, i64 %vl) {
2801; CHECK-LABEL: test_vsseg2_nxv4f32_triscv.vector.tuple_nxv16i8_2t:
2802; CHECK:       # %bb.0: # %entry
2803; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
2804; CHECK-NEXT:    vsseg2e32.v v8, (a0)
2805; CHECK-NEXT:    ret
2806entry:
2807  tail call void @llvm.riscv.vsseg2.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, i64 %vl, i64 5)
2808  ret void
2809}
2810
2811define void @test_vsseg2_mask_nxv4f32_triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, i64 %vl, <vscale x 4 x i1> %mask) {
2812; CHECK-LABEL: test_vsseg2_mask_nxv4f32_triscv.vector.tuple_nxv16i8_2t:
2813; CHECK:       # %bb.0: # %entry
2814; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
2815; CHECK-NEXT:    vsseg2e32.v v8, (a0), v0.t
2816; CHECK-NEXT:    ret
2817entry:
2818  tail call void @llvm.riscv.vsseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, <vscale x 4 x i1> %mask, i64 %vl, i64 5)
2819  ret void
2820}
2821
2822
2823define void @test_vsseg2_nxv8f32_triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, i64 %vl) {
2824; CHECK-LABEL: test_vsseg2_nxv8f32_triscv.vector.tuple_nxv32i8_2t:
2825; CHECK:       # %bb.0: # %entry
2826; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
2827; CHECK-NEXT:    vsseg2e32.v v8, (a0)
2828; CHECK-NEXT:    ret
2829entry:
2830  tail call void @llvm.riscv.vsseg2.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, i64 %vl, i64 5)
2831  ret void
2832}
2833
2834define void @test_vsseg2_mask_nxv8f32_triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, i64 %vl, <vscale x 8 x i1> %mask) {
2835; CHECK-LABEL: test_vsseg2_mask_nxv8f32_triscv.vector.tuple_nxv32i8_2t:
2836; CHECK:       # %bb.0: # %entry
2837; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
2838; CHECK-NEXT:    vsseg2e32.v v8, (a0), v0.t
2839; CHECK-NEXT:    ret
2840entry:
2841  tail call void @llvm.riscv.vsseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i1(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, <vscale x 8 x i1> %mask, i64 %vl, i64 5)
2842  ret void
2843}
2844
2845
2846define void @test_vsseg3_nxv1f32_triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %val, ptr %base, i64 %vl) {
2847; CHECK-LABEL: test_vsseg3_nxv1f32_triscv.vector.tuple_nxv4i8_3t:
2848; CHECK:       # %bb.0: # %entry
2849; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
2850; CHECK-NEXT:    vsseg3e32.v v8, (a0)
2851; CHECK-NEXT:    ret
2852entry:
2853  tail call void @llvm.riscv.vsseg3.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %val, ptr %base, i64 %vl, i64 5)
2854  ret void
2855}
2856
2857define void @test_vsseg3_mask_nxv1f32_triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %val, ptr %base, i64 %vl, <vscale x 1 x i1> %mask) {
2858; CHECK-LABEL: test_vsseg3_mask_nxv1f32_triscv.vector.tuple_nxv4i8_3t:
2859; CHECK:       # %bb.0: # %entry
2860; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
2861; CHECK-NEXT:    vsseg3e32.v v8, (a0), v0.t
2862; CHECK-NEXT:    ret
2863entry:
2864  tail call void @llvm.riscv.vsseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl, i64 5)
2865  ret void
2866}
2867
2868
2869define void @test_vsseg3_nxv2f32_triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, i64 %vl) {
2870; CHECK-LABEL: test_vsseg3_nxv2f32_triscv.vector.tuple_nxv8i8_3t:
2871; CHECK:       # %bb.0: # %entry
2872; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
2873; CHECK-NEXT:    vsseg3e32.v v8, (a0)
2874; CHECK-NEXT:    ret
2875entry:
2876  tail call void @llvm.riscv.vsseg3.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, i64 %vl, i64 5)
2877  ret void
2878}
2879
2880define void @test_vsseg3_mask_nxv2f32_triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, i64 %vl, <vscale x 2 x i1> %mask) {
2881; CHECK-LABEL: test_vsseg3_mask_nxv2f32_triscv.vector.tuple_nxv8i8_3t:
2882; CHECK:       # %bb.0: # %entry
2883; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
2884; CHECK-NEXT:    vsseg3e32.v v8, (a0), v0.t
2885; CHECK-NEXT:    ret
2886entry:
2887  tail call void @llvm.riscv.vsseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl, i64 5)
2888  ret void
2889}
2890
2891
2892define void @test_vsseg3_nxv4f32_triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, i64 %vl) {
2893; CHECK-LABEL: test_vsseg3_nxv4f32_triscv.vector.tuple_nxv16i8_3t:
2894; CHECK:       # %bb.0: # %entry
2895; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
2896; CHECK-NEXT:    vsseg3e32.v v8, (a0)
2897; CHECK-NEXT:    ret
2898entry:
2899  tail call void @llvm.riscv.vsseg3.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, i64 %vl, i64 5)
2900  ret void
2901}
2902
2903define void @test_vsseg3_mask_nxv4f32_triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, i64 %vl, <vscale x 4 x i1> %mask) {
2904; CHECK-LABEL: test_vsseg3_mask_nxv4f32_triscv.vector.tuple_nxv16i8_3t:
2905; CHECK:       # %bb.0: # %entry
2906; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
2907; CHECK-NEXT:    vsseg3e32.v v8, (a0), v0.t
2908; CHECK-NEXT:    ret
2909entry:
2910  tail call void @llvm.riscv.vsseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, <vscale x 4 x i1> %mask, i64 %vl, i64 5)
2911  ret void
2912}
2913
2914
2915define void @test_vsseg4_nxv1f32_triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %val, ptr %base, i64 %vl) {
2916; CHECK-LABEL: test_vsseg4_nxv1f32_triscv.vector.tuple_nxv4i8_4t:
2917; CHECK:       # %bb.0: # %entry
2918; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
2919; CHECK-NEXT:    vsseg4e32.v v8, (a0)
2920; CHECK-NEXT:    ret
2921entry:
2922  tail call void @llvm.riscv.vsseg4.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %val, ptr %base, i64 %vl, i64 5)
2923  ret void
2924}
2925
2926define void @test_vsseg4_mask_nxv1f32_triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %val, ptr %base, i64 %vl, <vscale x 1 x i1> %mask) {
2927; CHECK-LABEL: test_vsseg4_mask_nxv1f32_triscv.vector.tuple_nxv4i8_4t:
2928; CHECK:       # %bb.0: # %entry
2929; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
2930; CHECK-NEXT:    vsseg4e32.v v8, (a0), v0.t
2931; CHECK-NEXT:    ret
2932entry:
2933  tail call void @llvm.riscv.vsseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl, i64 5)
2934  ret void
2935}
2936
2937
2938define void @test_vsseg4_nxv2f32_triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, i64 %vl) {
2939; CHECK-LABEL: test_vsseg4_nxv2f32_triscv.vector.tuple_nxv8i8_4t:
2940; CHECK:       # %bb.0: # %entry
2941; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
2942; CHECK-NEXT:    vsseg4e32.v v8, (a0)
2943; CHECK-NEXT:    ret
2944entry:
2945  tail call void @llvm.riscv.vsseg4.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, i64 %vl, i64 5)
2946  ret void
2947}
2948
2949define void @test_vsseg4_mask_nxv2f32_triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, i64 %vl, <vscale x 2 x i1> %mask) {
2950; CHECK-LABEL: test_vsseg4_mask_nxv2f32_triscv.vector.tuple_nxv8i8_4t:
2951; CHECK:       # %bb.0: # %entry
2952; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
2953; CHECK-NEXT:    vsseg4e32.v v8, (a0), v0.t
2954; CHECK-NEXT:    ret
2955entry:
2956  tail call void @llvm.riscv.vsseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl, i64 5)
2957  ret void
2958}
2959
2960
2961define void @test_vsseg4_nxv4f32_triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, i64 %vl) {
2962; CHECK-LABEL: test_vsseg4_nxv4f32_triscv.vector.tuple_nxv16i8_4t:
2963; CHECK:       # %bb.0: # %entry
2964; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
2965; CHECK-NEXT:    vsseg4e32.v v8, (a0)
2966; CHECK-NEXT:    ret
2967entry:
2968  tail call void @llvm.riscv.vsseg4.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, i64 %vl, i64 5)
2969  ret void
2970}
2971
2972define void @test_vsseg4_mask_nxv4f32_triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, i64 %vl, <vscale x 4 x i1> %mask) {
2973; CHECK-LABEL: test_vsseg4_mask_nxv4f32_triscv.vector.tuple_nxv16i8_4t:
2974; CHECK:       # %bb.0: # %entry
2975; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
2976; CHECK-NEXT:    vsseg4e32.v v8, (a0), v0.t
2977; CHECK-NEXT:    ret
2978entry:
2979  tail call void @llvm.riscv.vsseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, <vscale x 4 x i1> %mask, i64 %vl, i64 5)
2980  ret void
2981}
2982
2983
2984define void @test_vsseg5_nxv1f32_triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) %val, ptr %base, i64 %vl) {
2985; CHECK-LABEL: test_vsseg5_nxv1f32_triscv.vector.tuple_nxv4i8_5t:
2986; CHECK:       # %bb.0: # %entry
2987; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
2988; CHECK-NEXT:    vsseg5e32.v v8, (a0)
2989; CHECK-NEXT:    ret
2990entry:
2991  tail call void @llvm.riscv.vsseg5.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) %val, ptr %base, i64 %vl, i64 5)
2992  ret void
2993}
2994
2995define void @test_vsseg5_mask_nxv1f32_triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) %val, ptr %base, i64 %vl, <vscale x 1 x i1> %mask) {
2996; CHECK-LABEL: test_vsseg5_mask_nxv1f32_triscv.vector.tuple_nxv4i8_5t:
2997; CHECK:       # %bb.0: # %entry
2998; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
2999; CHECK-NEXT:    vsseg5e32.v v8, (a0), v0.t
3000; CHECK-NEXT:    ret
3001entry:
3002  tail call void @llvm.riscv.vsseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl, i64 5)
3003  ret void
3004}
3005
3006
3007define void @test_vsseg5_nxv2f32_triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, i64 %vl) {
3008; CHECK-LABEL: test_vsseg5_nxv2f32_triscv.vector.tuple_nxv8i8_5t:
3009; CHECK:       # %bb.0: # %entry
3010; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
3011; CHECK-NEXT:    vsseg5e32.v v8, (a0)
3012; CHECK-NEXT:    ret
3013entry:
3014  tail call void @llvm.riscv.vsseg5.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, i64 %vl, i64 5)
3015  ret void
3016}
3017
3018define void @test_vsseg5_mask_nxv2f32_triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, i64 %vl, <vscale x 2 x i1> %mask) {
3019; CHECK-LABEL: test_vsseg5_mask_nxv2f32_triscv.vector.tuple_nxv8i8_5t:
3020; CHECK:       # %bb.0: # %entry
3021; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
3022; CHECK-NEXT:    vsseg5e32.v v8, (a0), v0.t
3023; CHECK-NEXT:    ret
3024entry:
3025  tail call void @llvm.riscv.vsseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl, i64 5)
3026  ret void
3027}
3028
3029
3030define void @test_vsseg6_nxv1f32_triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) %val, ptr %base, i64 %vl) {
3031; CHECK-LABEL: test_vsseg6_nxv1f32_triscv.vector.tuple_nxv4i8_6t:
3032; CHECK:       # %bb.0: # %entry
3033; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
3034; CHECK-NEXT:    vsseg6e32.v v8, (a0)
3035; CHECK-NEXT:    ret
3036entry:
3037  tail call void @llvm.riscv.vsseg6.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) %val, ptr %base, i64 %vl, i64 5)
3038  ret void
3039}
3040
3041define void @test_vsseg6_mask_nxv1f32_triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) %val, ptr %base, i64 %vl, <vscale x 1 x i1> %mask) {
3042; CHECK-LABEL: test_vsseg6_mask_nxv1f32_triscv.vector.tuple_nxv4i8_6t:
3043; CHECK:       # %bb.0: # %entry
3044; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
3045; CHECK-NEXT:    vsseg6e32.v v8, (a0), v0.t
3046; CHECK-NEXT:    ret
3047entry:
3048  tail call void @llvm.riscv.vsseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl, i64 5)
3049  ret void
3050}
3051
3052
3053define void @test_vsseg6_nxv2f32_triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, i64 %vl) {
3054; CHECK-LABEL: test_vsseg6_nxv2f32_triscv.vector.tuple_nxv8i8_6t:
3055; CHECK:       # %bb.0: # %entry
3056; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
3057; CHECK-NEXT:    vsseg6e32.v v8, (a0)
3058; CHECK-NEXT:    ret
3059entry:
3060  tail call void @llvm.riscv.vsseg6.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, i64 %vl, i64 5)
3061  ret void
3062}
3063
3064define void @test_vsseg6_mask_nxv2f32_triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, i64 %vl, <vscale x 2 x i1> %mask) {
3065; CHECK-LABEL: test_vsseg6_mask_nxv2f32_triscv.vector.tuple_nxv8i8_6t:
3066; CHECK:       # %bb.0: # %entry
3067; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
3068; CHECK-NEXT:    vsseg6e32.v v8, (a0), v0.t
3069; CHECK-NEXT:    ret
3070entry:
3071  tail call void @llvm.riscv.vsseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl, i64 5)
3072  ret void
3073}
3074
3075
3076define void @test_vsseg7_nxv1f32_triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) %val, ptr %base, i64 %vl) {
3077; CHECK-LABEL: test_vsseg7_nxv1f32_triscv.vector.tuple_nxv4i8_7t:
3078; CHECK:       # %bb.0: # %entry
3079; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
3080; CHECK-NEXT:    vsseg7e32.v v8, (a0)
3081; CHECK-NEXT:    ret
3082entry:
3083  tail call void @llvm.riscv.vsseg7.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) %val, ptr %base, i64 %vl, i64 5)
3084  ret void
3085}
3086
3087define void @test_vsseg7_mask_nxv1f32_triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) %val, ptr %base, i64 %vl, <vscale x 1 x i1> %mask) {
3088; CHECK-LABEL: test_vsseg7_mask_nxv1f32_triscv.vector.tuple_nxv4i8_7t:
3089; CHECK:       # %bb.0: # %entry
3090; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
3091; CHECK-NEXT:    vsseg7e32.v v8, (a0), v0.t
3092; CHECK-NEXT:    ret
3093entry:
3094  tail call void @llvm.riscv.vsseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl, i64 5)
3095  ret void
3096}
3097
3098
3099define void @test_vsseg7_nxv2f32_triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, i64 %vl) {
3100; CHECK-LABEL: test_vsseg7_nxv2f32_triscv.vector.tuple_nxv8i8_7t:
3101; CHECK:       # %bb.0: # %entry
3102; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
3103; CHECK-NEXT:    vsseg7e32.v v8, (a0)
3104; CHECK-NEXT:    ret
3105entry:
3106  tail call void @llvm.riscv.vsseg7.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, i64 %vl, i64 5)
3107  ret void
3108}
3109
3110define void @test_vsseg7_mask_nxv2f32_triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, i64 %vl, <vscale x 2 x i1> %mask) {
3111; CHECK-LABEL: test_vsseg7_mask_nxv2f32_triscv.vector.tuple_nxv8i8_7t:
3112; CHECK:       # %bb.0: # %entry
3113; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
3114; CHECK-NEXT:    vsseg7e32.v v8, (a0), v0.t
3115; CHECK-NEXT:    ret
3116entry:
3117  tail call void @llvm.riscv.vsseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl, i64 5)
3118  ret void
3119}
3120
3121
3122define void @test_vsseg8_nxv1f32_triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %val, ptr %base, i64 %vl) {
3123; CHECK-LABEL: test_vsseg8_nxv1f32_triscv.vector.tuple_nxv4i8_8t:
3124; CHECK:       # %bb.0: # %entry
3125; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
3126; CHECK-NEXT:    vsseg8e32.v v8, (a0)
3127; CHECK-NEXT:    ret
3128entry:
3129  tail call void @llvm.riscv.vsseg8.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %val, ptr %base, i64 %vl, i64 5)
3130  ret void
3131}
3132
3133define void @test_vsseg8_mask_nxv1f32_triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %val, ptr %base, i64 %vl, <vscale x 1 x i1> %mask) {
3134; CHECK-LABEL: test_vsseg8_mask_nxv1f32_triscv.vector.tuple_nxv4i8_8t:
3135; CHECK:       # %bb.0: # %entry
3136; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
3137; CHECK-NEXT:    vsseg8e32.v v8, (a0), v0.t
3138; CHECK-NEXT:    ret
3139entry:
3140  tail call void @llvm.riscv.vsseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl, i64 5)
3141  ret void
3142}
3143
3144
3145define void @test_vsseg8_nxv2f32_triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, i64 %vl) {
3146; CHECK-LABEL: test_vsseg8_nxv2f32_triscv.vector.tuple_nxv8i8_8t:
3147; CHECK:       # %bb.0: # %entry
3148; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
3149; CHECK-NEXT:    vsseg8e32.v v8, (a0)
3150; CHECK-NEXT:    ret
3151entry:
3152  tail call void @llvm.riscv.vsseg8.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, i64 %vl, i64 5)
3153  ret void
3154}
3155
3156define void @test_vsseg8_mask_nxv2f32_triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, i64 %vl, <vscale x 2 x i1> %mask) {
3157; CHECK-LABEL: test_vsseg8_mask_nxv2f32_triscv.vector.tuple_nxv8i8_8t:
3158; CHECK:       # %bb.0: # %entry
3159; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
3160; CHECK-NEXT:    vsseg8e32.v v8, (a0), v0.t
3161; CHECK-NEXT:    ret
3162entry:
3163  tail call void @llvm.riscv.vsseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl, i64 5)
3164  ret void
3165}
3166
3167
3168define void @test_vsseg2_nxv1f64_triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, i64 %vl) {
3169; CHECK-LABEL: test_vsseg2_nxv1f64_triscv.vector.tuple_nxv8i8_2t:
3170; CHECK:       # %bb.0: # %entry
3171; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
3172; CHECK-NEXT:    vsseg2e64.v v8, (a0)
3173; CHECK-NEXT:    ret
3174entry:
3175  tail call void @llvm.riscv.vsseg2.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, i64 %vl, i64 6)
3176  ret void
3177}
3178
3179define void @test_vsseg2_mask_nxv1f64_triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, i64 %vl, <vscale x 1 x i1> %mask) {
3180; CHECK-LABEL: test_vsseg2_mask_nxv1f64_triscv.vector.tuple_nxv8i8_2t:
3181; CHECK:       # %bb.0: # %entry
3182; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
3183; CHECK-NEXT:    vsseg2e64.v v8, (a0), v0.t
3184; CHECK-NEXT:    ret
3185entry:
3186  tail call void @llvm.riscv.vsseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl, i64 6)
3187  ret void
3188}
3189
3190
3191define void @test_vsseg2_nxv2f64_triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, i64 %vl) {
3192; CHECK-LABEL: test_vsseg2_nxv2f64_triscv.vector.tuple_nxv16i8_2t:
3193; CHECK:       # %bb.0: # %entry
3194; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
3195; CHECK-NEXT:    vsseg2e64.v v8, (a0)
3196; CHECK-NEXT:    ret
3197entry:
3198  tail call void @llvm.riscv.vsseg2.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, i64 %vl, i64 6)
3199  ret void
3200}
3201
3202define void @test_vsseg2_mask_nxv2f64_triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, i64 %vl, <vscale x 2 x i1> %mask) {
3203; CHECK-LABEL: test_vsseg2_mask_nxv2f64_triscv.vector.tuple_nxv16i8_2t:
3204; CHECK:       # %bb.0: # %entry
3205; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
3206; CHECK-NEXT:    vsseg2e64.v v8, (a0), v0.t
3207; CHECK-NEXT:    ret
3208entry:
3209  tail call void @llvm.riscv.vsseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl, i64 6)
3210  ret void
3211}
3212
3213
3214define void @test_vsseg2_nxv4f64_triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, i64 %vl) {
3215; CHECK-LABEL: test_vsseg2_nxv4f64_triscv.vector.tuple_nxv32i8_2t:
3216; CHECK:       # %bb.0: # %entry
3217; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
3218; CHECK-NEXT:    vsseg2e64.v v8, (a0)
3219; CHECK-NEXT:    ret
3220entry:
3221  tail call void @llvm.riscv.vsseg2.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, i64 %vl, i64 6)
3222  ret void
3223}
3224
3225define void @test_vsseg2_mask_nxv4f64_triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, i64 %vl, <vscale x 4 x i1> %mask) {
3226; CHECK-LABEL: test_vsseg2_mask_nxv4f64_triscv.vector.tuple_nxv32i8_2t:
3227; CHECK:       # %bb.0: # %entry
3228; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
3229; CHECK-NEXT:    vsseg2e64.v v8, (a0), v0.t
3230; CHECK-NEXT:    ret
3231entry:
3232  tail call void @llvm.riscv.vsseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i1(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, <vscale x 4 x i1> %mask, i64 %vl, i64 6)
3233  ret void
3234}
3235
3236
3237define void @test_vsseg3_nxv1f64_triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, i64 %vl) {
3238; CHECK-LABEL: test_vsseg3_nxv1f64_triscv.vector.tuple_nxv8i8_3t:
3239; CHECK:       # %bb.0: # %entry
3240; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
3241; CHECK-NEXT:    vsseg3e64.v v8, (a0)
3242; CHECK-NEXT:    ret
3243entry:
3244  tail call void @llvm.riscv.vsseg3.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, i64 %vl, i64 6)
3245  ret void
3246}
3247
3248define void @test_vsseg3_mask_nxv1f64_triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, i64 %vl, <vscale x 1 x i1> %mask) {
3249; CHECK-LABEL: test_vsseg3_mask_nxv1f64_triscv.vector.tuple_nxv8i8_3t:
3250; CHECK:       # %bb.0: # %entry
3251; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
3252; CHECK-NEXT:    vsseg3e64.v v8, (a0), v0.t
3253; CHECK-NEXT:    ret
3254entry:
3255  tail call void @llvm.riscv.vsseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl, i64 6)
3256  ret void
3257}
3258
3259
3260define void @test_vsseg3_nxv2f64_triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, i64 %vl) {
3261; CHECK-LABEL: test_vsseg3_nxv2f64_triscv.vector.tuple_nxv16i8_3t:
3262; CHECK:       # %bb.0: # %entry
3263; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
3264; CHECK-NEXT:    vsseg3e64.v v8, (a0)
3265; CHECK-NEXT:    ret
3266entry:
3267  tail call void @llvm.riscv.vsseg3.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, i64 %vl, i64 6)
3268  ret void
3269}
3270
3271define void @test_vsseg3_mask_nxv2f64_triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, i64 %vl, <vscale x 2 x i1> %mask) {
3272; CHECK-LABEL: test_vsseg3_mask_nxv2f64_triscv.vector.tuple_nxv16i8_3t:
3273; CHECK:       # %bb.0: # %entry
3274; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
3275; CHECK-NEXT:    vsseg3e64.v v8, (a0), v0.t
3276; CHECK-NEXT:    ret
3277entry:
3278  tail call void @llvm.riscv.vsseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl, i64 6)
3279  ret void
3280}
3281
3282
3283define void @test_vsseg4_nxv1f64_triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, i64 %vl) {
3284; CHECK-LABEL: test_vsseg4_nxv1f64_triscv.vector.tuple_nxv8i8_4t:
3285; CHECK:       # %bb.0: # %entry
3286; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
3287; CHECK-NEXT:    vsseg4e64.v v8, (a0)
3288; CHECK-NEXT:    ret
3289entry:
3290  tail call void @llvm.riscv.vsseg4.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, i64 %vl, i64 6)
3291  ret void
3292}
3293
3294define void @test_vsseg4_mask_nxv1f64_triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, i64 %vl, <vscale x 1 x i1> %mask) {
3295; CHECK-LABEL: test_vsseg4_mask_nxv1f64_triscv.vector.tuple_nxv8i8_4t:
3296; CHECK:       # %bb.0: # %entry
3297; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
3298; CHECK-NEXT:    vsseg4e64.v v8, (a0), v0.t
3299; CHECK-NEXT:    ret
3300entry:
3301  tail call void @llvm.riscv.vsseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl, i64 6)
3302  ret void
3303}
3304
3305
3306define void @test_vsseg4_nxv2f64_triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, i64 %vl) {
3307; CHECK-LABEL: test_vsseg4_nxv2f64_triscv.vector.tuple_nxv16i8_4t:
3308; CHECK:       # %bb.0: # %entry
3309; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
3310; CHECK-NEXT:    vsseg4e64.v v8, (a0)
3311; CHECK-NEXT:    ret
3312entry:
3313  tail call void @llvm.riscv.vsseg4.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, i64 %vl, i64 6)
3314  ret void
3315}
3316
3317define void @test_vsseg4_mask_nxv2f64_triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, i64 %vl, <vscale x 2 x i1> %mask) {
3318; CHECK-LABEL: test_vsseg4_mask_nxv2f64_triscv.vector.tuple_nxv16i8_4t:
3319; CHECK:       # %bb.0: # %entry
3320; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
3321; CHECK-NEXT:    vsseg4e64.v v8, (a0), v0.t
3322; CHECK-NEXT:    ret
3323entry:
3324  tail call void @llvm.riscv.vsseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl, i64 6)
3325  ret void
3326}
3327
3328
3329define void @test_vsseg5_nxv1f64_triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, i64 %vl) {
3330; CHECK-LABEL: test_vsseg5_nxv1f64_triscv.vector.tuple_nxv8i8_5t:
3331; CHECK:       # %bb.0: # %entry
3332; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
3333; CHECK-NEXT:    vsseg5e64.v v8, (a0)
3334; CHECK-NEXT:    ret
3335entry:
3336  tail call void @llvm.riscv.vsseg5.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, i64 %vl, i64 6)
3337  ret void
3338}
3339
3340define void @test_vsseg5_mask_nxv1f64_triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, i64 %vl, <vscale x 1 x i1> %mask) {
3341; CHECK-LABEL: test_vsseg5_mask_nxv1f64_triscv.vector.tuple_nxv8i8_5t:
3342; CHECK:       # %bb.0: # %entry
3343; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
3344; CHECK-NEXT:    vsseg5e64.v v8, (a0), v0.t
3345; CHECK-NEXT:    ret
3346entry:
3347  tail call void @llvm.riscv.vsseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl, i64 6)
3348  ret void
3349}
3350
3351
3352define void @test_vsseg6_nxv1f64_triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, i64 %vl) {
3353; CHECK-LABEL: test_vsseg6_nxv1f64_triscv.vector.tuple_nxv8i8_6t:
3354; CHECK:       # %bb.0: # %entry
3355; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
3356; CHECK-NEXT:    vsseg6e64.v v8, (a0)
3357; CHECK-NEXT:    ret
3358entry:
3359  tail call void @llvm.riscv.vsseg6.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, i64 %vl, i64 6)
3360  ret void
3361}
3362
3363define void @test_vsseg6_mask_nxv1f64_triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, i64 %vl, <vscale x 1 x i1> %mask) {
3364; CHECK-LABEL: test_vsseg6_mask_nxv1f64_triscv.vector.tuple_nxv8i8_6t:
3365; CHECK:       # %bb.0: # %entry
3366; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
3367; CHECK-NEXT:    vsseg6e64.v v8, (a0), v0.t
3368; CHECK-NEXT:    ret
3369entry:
3370  tail call void @llvm.riscv.vsseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl, i64 6)
3371  ret void
3372}
3373
3374
3375define void @test_vsseg7_nxv1f64_triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, i64 %vl) {
3376; CHECK-LABEL: test_vsseg7_nxv1f64_triscv.vector.tuple_nxv8i8_7t:
3377; CHECK:       # %bb.0: # %entry
3378; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
3379; CHECK-NEXT:    vsseg7e64.v v8, (a0)
3380; CHECK-NEXT:    ret
3381entry:
3382  tail call void @llvm.riscv.vsseg7.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, i64 %vl, i64 6)
3383  ret void
3384}
3385
3386define void @test_vsseg7_mask_nxv1f64_triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, i64 %vl, <vscale x 1 x i1> %mask) {
3387; CHECK-LABEL: test_vsseg7_mask_nxv1f64_triscv.vector.tuple_nxv8i8_7t:
3388; CHECK:       # %bb.0: # %entry
3389; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
3390; CHECK-NEXT:    vsseg7e64.v v8, (a0), v0.t
3391; CHECK-NEXT:    ret
3392entry:
3393  tail call void @llvm.riscv.vsseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl, i64 6)
3394  ret void
3395}
3396
3397
3398define void @test_vsseg8_nxv1f64_triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, i64 %vl) {
3399; CHECK-LABEL: test_vsseg8_nxv1f64_triscv.vector.tuple_nxv8i8_8t:
3400; CHECK:       # %bb.0: # %entry
3401; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
3402; CHECK-NEXT:    vsseg8e64.v v8, (a0)
3403; CHECK-NEXT:    ret
3404entry:
3405  tail call void @llvm.riscv.vsseg8.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, i64 %vl, i64 6)
3406  ret void
3407}
3408
3409define void @test_vsseg8_mask_nxv1f64_triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, i64 %vl, <vscale x 1 x i1> %mask) {
3410; CHECK-LABEL: test_vsseg8_mask_nxv1f64_triscv.vector.tuple_nxv8i8_8t:
3411; CHECK:       # %bb.0: # %entry
3412; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
3413; CHECK-NEXT:    vsseg8e64.v v8, (a0), v0.t
3414; CHECK-NEXT:    ret
3415entry:
3416  tail call void @llvm.riscv.vsseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl, i64 6)
3417  ret void
3418}
3419
3420
3421define void @test_vsseg2_nxv1bf16_triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) %val, ptr %base, i64 %vl) {
3422; CHECK-LABEL: test_vsseg2_nxv1bf16_triscv.vector.tuple_nxv2i8_2t:
3423; CHECK:       # %bb.0: # %entry
3424; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
3425; CHECK-NEXT:    vsseg2e16.v v8, (a0)
3426; CHECK-NEXT:    ret
3427entry:
3428  tail call void @llvm.riscv.vsseg2.triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) %val, ptr %base, i64 %vl, i64 4)
3429  ret void
3430}
3431
3432define void @test_vsseg2_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) %val, ptr %base, i64 %vl, <vscale x 1 x i1> %mask) {
3433; CHECK-LABEL: test_vsseg2_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_2t:
3434; CHECK:       # %bb.0: # %entry
3435; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
3436; CHECK-NEXT:    vsseg2e16.v v8, (a0), v0.t
3437; CHECK-NEXT:    ret
3438entry:
3439  tail call void @llvm.riscv.vsseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl, i64 4)
3440  ret void
3441}
3442
3443
3444define void @test_vsseg2_nxv2bf16_triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %val, ptr %base, i64 %vl) {
3445; CHECK-LABEL: test_vsseg2_nxv2bf16_triscv.vector.tuple_nxv4i8_2t:
3446; CHECK:       # %bb.0: # %entry
3447; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
3448; CHECK-NEXT:    vsseg2e16.v v8, (a0)
3449; CHECK-NEXT:    ret
3450entry:
3451  tail call void @llvm.riscv.vsseg2.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %val, ptr %base, i64 %vl, i64 4)
3452  ret void
3453}
3454
3455define void @test_vsseg2_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %val, ptr %base, i64 %vl, <vscale x 2 x i1> %mask) {
3456; CHECK-LABEL: test_vsseg2_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_2t:
3457; CHECK:       # %bb.0: # %entry
3458; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
3459; CHECK-NEXT:    vsseg2e16.v v8, (a0), v0.t
3460; CHECK-NEXT:    ret
3461entry:
3462  tail call void @llvm.riscv.vsseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl, i64 4)
3463  ret void
3464}
3465
3466
3467define void @test_vsseg2_nxv4bf16_triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, i64 %vl) {
3468; CHECK-LABEL: test_vsseg2_nxv4bf16_triscv.vector.tuple_nxv8i8_2t:
3469; CHECK:       # %bb.0: # %entry
3470; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
3471; CHECK-NEXT:    vsseg2e16.v v8, (a0)
3472; CHECK-NEXT:    ret
3473entry:
3474  tail call void @llvm.riscv.vsseg2.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, i64 %vl, i64 4)
3475  ret void
3476}
3477
3478define void @test_vsseg2_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, i64 %vl, <vscale x 4 x i1> %mask) {
3479; CHECK-LABEL: test_vsseg2_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_2t:
3480; CHECK:       # %bb.0: # %entry
3481; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
3482; CHECK-NEXT:    vsseg2e16.v v8, (a0), v0.t
3483; CHECK-NEXT:    ret
3484entry:
3485  tail call void @llvm.riscv.vsseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, <vscale x 4 x i1> %mask, i64 %vl, i64 4)
3486  ret void
3487}
3488
3489
3490define void @test_vsseg2_nxv8bf16_triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, i64 %vl) {
3491; CHECK-LABEL: test_vsseg2_nxv8bf16_triscv.vector.tuple_nxv16i8_2t:
3492; CHECK:       # %bb.0: # %entry
3493; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
3494; CHECK-NEXT:    vsseg2e16.v v8, (a0)
3495; CHECK-NEXT:    ret
3496entry:
3497  tail call void @llvm.riscv.vsseg2.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, i64 %vl, i64 4)
3498  ret void
3499}
3500
3501define void @test_vsseg2_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, i64 %vl, <vscale x 8 x i1> %mask) {
3502; CHECK-LABEL: test_vsseg2_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_2t:
3503; CHECK:       # %bb.0: # %entry
3504; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
3505; CHECK-NEXT:    vsseg2e16.v v8, (a0), v0.t
3506; CHECK-NEXT:    ret
3507entry:
3508  tail call void @llvm.riscv.vsseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, <vscale x 8 x i1> %mask, i64 %vl, i64 4)
3509  ret void
3510}
3511
3512
3513define void @test_vsseg2_nxv16bf16_triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, i64 %vl) {
3514; CHECK-LABEL: test_vsseg2_nxv16bf16_triscv.vector.tuple_nxv32i8_2t:
3515; CHECK:       # %bb.0: # %entry
3516; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
3517; CHECK-NEXT:    vsseg2e16.v v8, (a0)
3518; CHECK-NEXT:    ret
3519entry:
3520  tail call void @llvm.riscv.vsseg2.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, i64 %vl, i64 4)
3521  ret void
3522}
3523
3524define void @test_vsseg2_mask_nxv16bf16_triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, i64 %vl, <vscale x 16 x i1> %mask) {
3525; CHECK-LABEL: test_vsseg2_mask_nxv16bf16_triscv.vector.tuple_nxv32i8_2t:
3526; CHECK:       # %bb.0: # %entry
3527; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
3528; CHECK-NEXT:    vsseg2e16.v v8, (a0), v0.t
3529; CHECK-NEXT:    ret
3530entry:
3531  tail call void @llvm.riscv.vsseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i1(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, <vscale x 16 x i1> %mask, i64 %vl, i64 4)
3532  ret void
3533}
3534
3535
3536define void @test_vsseg3_nxv1bf16_triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) %val, ptr %base, i64 %vl) {
3537; CHECK-LABEL: test_vsseg3_nxv1bf16_triscv.vector.tuple_nxv2i8_3t:
3538; CHECK:       # %bb.0: # %entry
3539; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
3540; CHECK-NEXT:    vsseg3e16.v v8, (a0)
3541; CHECK-NEXT:    ret
3542entry:
3543  tail call void @llvm.riscv.vsseg3.triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) %val, ptr %base, i64 %vl, i64 4)
3544  ret void
3545}
3546
3547define void @test_vsseg3_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) %val, ptr %base, i64 %vl, <vscale x 1 x i1> %mask) {
3548; CHECK-LABEL: test_vsseg3_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_3t:
3549; CHECK:       # %bb.0: # %entry
3550; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
3551; CHECK-NEXT:    vsseg3e16.v v8, (a0), v0.t
3552; CHECK-NEXT:    ret
3553entry:
3554  tail call void @llvm.riscv.vsseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl, i64 4)
3555  ret void
3556}
3557
3558
3559define void @test_vsseg3_nxv2bf16_triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %val, ptr %base, i64 %vl) {
3560; CHECK-LABEL: test_vsseg3_nxv2bf16_triscv.vector.tuple_nxv4i8_3t:
3561; CHECK:       # %bb.0: # %entry
3562; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
3563; CHECK-NEXT:    vsseg3e16.v v8, (a0)
3564; CHECK-NEXT:    ret
3565entry:
3566  tail call void @llvm.riscv.vsseg3.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %val, ptr %base, i64 %vl, i64 4)
3567  ret void
3568}
3569
3570define void @test_vsseg3_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %val, ptr %base, i64 %vl, <vscale x 2 x i1> %mask) {
3571; CHECK-LABEL: test_vsseg3_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_3t:
3572; CHECK:       # %bb.0: # %entry
3573; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
3574; CHECK-NEXT:    vsseg3e16.v v8, (a0), v0.t
3575; CHECK-NEXT:    ret
3576entry:
3577  tail call void @llvm.riscv.vsseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl, i64 4)
3578  ret void
3579}
3580
3581
3582define void @test_vsseg3_nxv4bf16_triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, i64 %vl) {
3583; CHECK-LABEL: test_vsseg3_nxv4bf16_triscv.vector.tuple_nxv8i8_3t:
3584; CHECK:       # %bb.0: # %entry
3585; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
3586; CHECK-NEXT:    vsseg3e16.v v8, (a0)
3587; CHECK-NEXT:    ret
3588entry:
3589  tail call void @llvm.riscv.vsseg3.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, i64 %vl, i64 4)
3590  ret void
3591}
3592
3593define void @test_vsseg3_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, i64 %vl, <vscale x 4 x i1> %mask) {
3594; CHECK-LABEL: test_vsseg3_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_3t:
3595; CHECK:       # %bb.0: # %entry
3596; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
3597; CHECK-NEXT:    vsseg3e16.v v8, (a0), v0.t
3598; CHECK-NEXT:    ret
3599entry:
3600  tail call void @llvm.riscv.vsseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, <vscale x 4 x i1> %mask, i64 %vl, i64 4)
3601  ret void
3602}
3603
3604
3605define void @test_vsseg3_nxv8bf16_triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, i64 %vl) {
3606; CHECK-LABEL: test_vsseg3_nxv8bf16_triscv.vector.tuple_nxv16i8_3t:
3607; CHECK:       # %bb.0: # %entry
3608; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
3609; CHECK-NEXT:    vsseg3e16.v v8, (a0)
3610; CHECK-NEXT:    ret
3611entry:
3612  tail call void @llvm.riscv.vsseg3.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, i64 %vl, i64 4)
3613  ret void
3614}
3615
3616define void @test_vsseg3_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, i64 %vl, <vscale x 8 x i1> %mask) {
3617; CHECK-LABEL: test_vsseg3_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_3t:
3618; CHECK:       # %bb.0: # %entry
3619; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
3620; CHECK-NEXT:    vsseg3e16.v v8, (a0), v0.t
3621; CHECK-NEXT:    ret
3622entry:
3623  tail call void @llvm.riscv.vsseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, <vscale x 8 x i1> %mask, i64 %vl, i64 4)
3624  ret void
3625}
3626
3627
3628define void @test_vsseg4_nxv1bf16_triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) %val, ptr %base, i64 %vl) {
3629; CHECK-LABEL: test_vsseg4_nxv1bf16_triscv.vector.tuple_nxv2i8_4t:
3630; CHECK:       # %bb.0: # %entry
3631; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
3632; CHECK-NEXT:    vsseg4e16.v v8, (a0)
3633; CHECK-NEXT:    ret
3634entry:
3635  tail call void @llvm.riscv.vsseg4.triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) %val, ptr %base, i64 %vl, i64 4)
3636  ret void
3637}
3638
3639define void @test_vsseg4_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) %val, ptr %base, i64 %vl, <vscale x 1 x i1> %mask) {
3640; CHECK-LABEL: test_vsseg4_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_4t:
3641; CHECK:       # %bb.0: # %entry
3642; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
3643; CHECK-NEXT:    vsseg4e16.v v8, (a0), v0.t
3644; CHECK-NEXT:    ret
3645entry:
3646  tail call void @llvm.riscv.vsseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl, i64 4)
3647  ret void
3648}
3649
3650
3651define void @test_vsseg4_nxv2bf16_triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %val, ptr %base, i64 %vl) {
3652; CHECK-LABEL: test_vsseg4_nxv2bf16_triscv.vector.tuple_nxv4i8_4t:
3653; CHECK:       # %bb.0: # %entry
3654; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
3655; CHECK-NEXT:    vsseg4e16.v v8, (a0)
3656; CHECK-NEXT:    ret
3657entry:
3658  tail call void @llvm.riscv.vsseg4.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %val, ptr %base, i64 %vl, i64 4)
3659  ret void
3660}
3661
3662define void @test_vsseg4_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %val, ptr %base, i64 %vl, <vscale x 2 x i1> %mask) {
3663; CHECK-LABEL: test_vsseg4_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_4t:
3664; CHECK:       # %bb.0: # %entry
3665; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
3666; CHECK-NEXT:    vsseg4e16.v v8, (a0), v0.t
3667; CHECK-NEXT:    ret
3668entry:
3669  tail call void @llvm.riscv.vsseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl, i64 4)
3670  ret void
3671}
3672
3673
3674define void @test_vsseg4_nxv4bf16_triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, i64 %vl) {
3675; CHECK-LABEL: test_vsseg4_nxv4bf16_triscv.vector.tuple_nxv8i8_4t:
3676; CHECK:       # %bb.0: # %entry
3677; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
3678; CHECK-NEXT:    vsseg4e16.v v8, (a0)
3679; CHECK-NEXT:    ret
3680entry:
3681  tail call void @llvm.riscv.vsseg4.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, i64 %vl, i64 4)
3682  ret void
3683}
3684
3685define void @test_vsseg4_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, i64 %vl, <vscale x 4 x i1> %mask) {
3686; CHECK-LABEL: test_vsseg4_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_4t:
3687; CHECK:       # %bb.0: # %entry
3688; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
3689; CHECK-NEXT:    vsseg4e16.v v8, (a0), v0.t
3690; CHECK-NEXT:    ret
3691entry:
3692  tail call void @llvm.riscv.vsseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, <vscale x 4 x i1> %mask, i64 %vl, i64 4)
3693  ret void
3694}
3695
3696
3697define void @test_vsseg4_nxv8bf16_triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, i64 %vl) {
3698; CHECK-LABEL: test_vsseg4_nxv8bf16_triscv.vector.tuple_nxv16i8_4t:
3699; CHECK:       # %bb.0: # %entry
3700; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
3701; CHECK-NEXT:    vsseg4e16.v v8, (a0)
3702; CHECK-NEXT:    ret
3703entry:
3704  tail call void @llvm.riscv.vsseg4.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, i64 %vl, i64 4)
3705  ret void
3706}
3707
3708define void @test_vsseg4_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, i64 %vl, <vscale x 8 x i1> %mask) {
3709; CHECK-LABEL: test_vsseg4_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_4t:
3710; CHECK:       # %bb.0: # %entry
3711; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
3712; CHECK-NEXT:    vsseg4e16.v v8, (a0), v0.t
3713; CHECK-NEXT:    ret
3714entry:
3715  tail call void @llvm.riscv.vsseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, <vscale x 8 x i1> %mask, i64 %vl, i64 4)
3716  ret void
3717}
3718
3719
3720define void @test_vsseg5_nxv1bf16_triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) %val, ptr %base, i64 %vl) {
3721; CHECK-LABEL: test_vsseg5_nxv1bf16_triscv.vector.tuple_nxv2i8_5t:
3722; CHECK:       # %bb.0: # %entry
3723; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
3724; CHECK-NEXT:    vsseg5e16.v v8, (a0)
3725; CHECK-NEXT:    ret
3726entry:
3727  tail call void @llvm.riscv.vsseg5.triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) %val, ptr %base, i64 %vl, i64 4)
3728  ret void
3729}
3730
3731define void @test_vsseg5_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) %val, ptr %base, i64 %vl, <vscale x 1 x i1> %mask) {
3732; CHECK-LABEL: test_vsseg5_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_5t:
3733; CHECK:       # %bb.0: # %entry
3734; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
3735; CHECK-NEXT:    vsseg5e16.v v8, (a0), v0.t
3736; CHECK-NEXT:    ret
3737entry:
3738  tail call void @llvm.riscv.vsseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl, i64 4)
3739  ret void
3740}
3741
3742
3743define void @test_vsseg5_nxv2bf16_triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) %val, ptr %base, i64 %vl) {
3744; CHECK-LABEL: test_vsseg5_nxv2bf16_triscv.vector.tuple_nxv4i8_5t:
3745; CHECK:       # %bb.0: # %entry
3746; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
3747; CHECK-NEXT:    vsseg5e16.v v8, (a0)
3748; CHECK-NEXT:    ret
3749entry:
3750  tail call void @llvm.riscv.vsseg5.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) %val, ptr %base, i64 %vl, i64 4)
3751  ret void
3752}
3753
3754define void @test_vsseg5_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) %val, ptr %base, i64 %vl, <vscale x 2 x i1> %mask) {
3755; CHECK-LABEL: test_vsseg5_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_5t:
3756; CHECK:       # %bb.0: # %entry
3757; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
3758; CHECK-NEXT:    vsseg5e16.v v8, (a0), v0.t
3759; CHECK-NEXT:    ret
3760entry:
3761  tail call void @llvm.riscv.vsseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl, i64 4)
3762  ret void
3763}
3764
3765
3766define void @test_vsseg5_nxv4bf16_triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, i64 %vl) {
3767; CHECK-LABEL: test_vsseg5_nxv4bf16_triscv.vector.tuple_nxv8i8_5t:
3768; CHECK:       # %bb.0: # %entry
3769; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
3770; CHECK-NEXT:    vsseg5e16.v v8, (a0)
3771; CHECK-NEXT:    ret
3772entry:
3773  tail call void @llvm.riscv.vsseg5.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, i64 %vl, i64 4)
3774  ret void
3775}
3776
3777define void @test_vsseg5_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, i64 %vl, <vscale x 4 x i1> %mask) {
3778; CHECK-LABEL: test_vsseg5_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_5t:
3779; CHECK:       # %bb.0: # %entry
3780; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
3781; CHECK-NEXT:    vsseg5e16.v v8, (a0), v0.t
3782; CHECK-NEXT:    ret
3783entry:
3784  tail call void @llvm.riscv.vsseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, <vscale x 4 x i1> %mask, i64 %vl, i64 4)
3785  ret void
3786}
3787
3788
3789define void @test_vsseg6_nxv1bf16_triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) %val, ptr %base, i64 %vl) {
3790; CHECK-LABEL: test_vsseg6_nxv1bf16_triscv.vector.tuple_nxv2i8_6t:
3791; CHECK:       # %bb.0: # %entry
3792; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
3793; CHECK-NEXT:    vsseg6e16.v v8, (a0)
3794; CHECK-NEXT:    ret
3795entry:
3796  tail call void @llvm.riscv.vsseg6.triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) %val, ptr %base, i64 %vl, i64 4)
3797  ret void
3798}
3799
3800define void @test_vsseg6_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) %val, ptr %base, i64 %vl, <vscale x 1 x i1> %mask) {
3801; CHECK-LABEL: test_vsseg6_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_6t:
3802; CHECK:       # %bb.0: # %entry
3803; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
3804; CHECK-NEXT:    vsseg6e16.v v8, (a0), v0.t
3805; CHECK-NEXT:    ret
3806entry:
3807  tail call void @llvm.riscv.vsseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl, i64 4)
3808  ret void
3809}
3810
3811
3812define void @test_vsseg6_nxv2bf16_triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) %val, ptr %base, i64 %vl) {
3813; CHECK-LABEL: test_vsseg6_nxv2bf16_triscv.vector.tuple_nxv4i8_6t:
3814; CHECK:       # %bb.0: # %entry
3815; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
3816; CHECK-NEXT:    vsseg6e16.v v8, (a0)
3817; CHECK-NEXT:    ret
3818entry:
3819  tail call void @llvm.riscv.vsseg6.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) %val, ptr %base, i64 %vl, i64 4)
3820  ret void
3821}
3822
3823define void @test_vsseg6_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) %val, ptr %base, i64 %vl, <vscale x 2 x i1> %mask) {
3824; CHECK-LABEL: test_vsseg6_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_6t:
3825; CHECK:       # %bb.0: # %entry
3826; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
3827; CHECK-NEXT:    vsseg6e16.v v8, (a0), v0.t
3828; CHECK-NEXT:    ret
3829entry:
3830  tail call void @llvm.riscv.vsseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl, i64 4)
3831  ret void
3832}
3833
3834
3835define void @test_vsseg6_nxv4bf16_triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, i64 %vl) {
3836; CHECK-LABEL: test_vsseg6_nxv4bf16_triscv.vector.tuple_nxv8i8_6t:
3837; CHECK:       # %bb.0: # %entry
3838; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
3839; CHECK-NEXT:    vsseg6e16.v v8, (a0)
3840; CHECK-NEXT:    ret
3841entry:
3842  tail call void @llvm.riscv.vsseg6.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, i64 %vl, i64 4)
3843  ret void
3844}
3845
3846define void @test_vsseg6_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, i64 %vl, <vscale x 4 x i1> %mask) {
3847; CHECK-LABEL: test_vsseg6_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_6t:
3848; CHECK:       # %bb.0: # %entry
3849; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
3850; CHECK-NEXT:    vsseg6e16.v v8, (a0), v0.t
3851; CHECK-NEXT:    ret
3852entry:
3853  tail call void @llvm.riscv.vsseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, <vscale x 4 x i1> %mask, i64 %vl, i64 4)
3854  ret void
3855}
3856
3857
3858define void @test_vsseg7_nxv1bf16_triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) %val, ptr %base, i64 %vl) {
3859; CHECK-LABEL: test_vsseg7_nxv1bf16_triscv.vector.tuple_nxv2i8_7t:
3860; CHECK:       # %bb.0: # %entry
3861; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
3862; CHECK-NEXT:    vsseg7e16.v v8, (a0)
3863; CHECK-NEXT:    ret
3864entry:
3865  tail call void @llvm.riscv.vsseg7.triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) %val, ptr %base, i64 %vl, i64 4)
3866  ret void
3867}
3868
3869define void @test_vsseg7_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) %val, ptr %base, i64 %vl, <vscale x 1 x i1> %mask) {
3870; CHECK-LABEL: test_vsseg7_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_7t:
3871; CHECK:       # %bb.0: # %entry
3872; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
3873; CHECK-NEXT:    vsseg7e16.v v8, (a0), v0.t
3874; CHECK-NEXT:    ret
3875entry:
3876  tail call void @llvm.riscv.vsseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl, i64 4)
3877  ret void
3878}
3879
3880
3881define void @test_vsseg7_nxv2bf16_triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) %val, ptr %base, i64 %vl) {
3882; CHECK-LABEL: test_vsseg7_nxv2bf16_triscv.vector.tuple_nxv4i8_7t:
3883; CHECK:       # %bb.0: # %entry
3884; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
3885; CHECK-NEXT:    vsseg7e16.v v8, (a0)
3886; CHECK-NEXT:    ret
3887entry:
3888  tail call void @llvm.riscv.vsseg7.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) %val, ptr %base, i64 %vl, i64 4)
3889  ret void
3890}
3891
3892define void @test_vsseg7_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) %val, ptr %base, i64 %vl, <vscale x 2 x i1> %mask) {
3893; CHECK-LABEL: test_vsseg7_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_7t:
3894; CHECK:       # %bb.0: # %entry
3895; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
3896; CHECK-NEXT:    vsseg7e16.v v8, (a0), v0.t
3897; CHECK-NEXT:    ret
3898entry:
3899  tail call void @llvm.riscv.vsseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl, i64 4)
3900  ret void
3901}
3902
3903
3904define void @test_vsseg7_nxv4bf16_triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, i64 %vl) {
3905; CHECK-LABEL: test_vsseg7_nxv4bf16_triscv.vector.tuple_nxv8i8_7t:
3906; CHECK:       # %bb.0: # %entry
3907; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
3908; CHECK-NEXT:    vsseg7e16.v v8, (a0)
3909; CHECK-NEXT:    ret
3910entry:
3911  tail call void @llvm.riscv.vsseg7.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, i64 %vl, i64 4)
3912  ret void
3913}
3914
3915define void @test_vsseg7_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, i64 %vl, <vscale x 4 x i1> %mask) {
3916; CHECK-LABEL: test_vsseg7_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_7t:
3917; CHECK:       # %bb.0: # %entry
3918; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
3919; CHECK-NEXT:    vsseg7e16.v v8, (a0), v0.t
3920; CHECK-NEXT:    ret
3921entry:
3922  tail call void @llvm.riscv.vsseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, <vscale x 4 x i1> %mask, i64 %vl, i64 4)
3923  ret void
3924}
3925
3926
3927define void @test_vsseg8_nxv1bf16_triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) %val, ptr %base, i64 %vl) {
3928; CHECK-LABEL: test_vsseg8_nxv1bf16_triscv.vector.tuple_nxv2i8_8t:
3929; CHECK:       # %bb.0: # %entry
3930; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
3931; CHECK-NEXT:    vsseg8e16.v v8, (a0)
3932; CHECK-NEXT:    ret
3933entry:
3934  tail call void @llvm.riscv.vsseg8.triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) %val, ptr %base, i64 %vl, i64 4)
3935  ret void
3936}
3937
3938define void @test_vsseg8_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) %val, ptr %base, i64 %vl, <vscale x 1 x i1> %mask) {
3939; CHECK-LABEL: test_vsseg8_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_8t:
3940; CHECK:       # %bb.0: # %entry
3941; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
3942; CHECK-NEXT:    vsseg8e16.v v8, (a0), v0.t
3943; CHECK-NEXT:    ret
3944entry:
3945  tail call void @llvm.riscv.vsseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl, i64 4)
3946  ret void
3947}
3948
3949
3950define void @test_vsseg8_nxv2bf16_triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %val, ptr %base, i64 %vl) {
3951; CHECK-LABEL: test_vsseg8_nxv2bf16_triscv.vector.tuple_nxv4i8_8t:
3952; CHECK:       # %bb.0: # %entry
3953; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
3954; CHECK-NEXT:    vsseg8e16.v v8, (a0)
3955; CHECK-NEXT:    ret
3956entry:
3957  tail call void @llvm.riscv.vsseg8.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %val, ptr %base, i64 %vl, i64 4)
3958  ret void
3959}
3960
3961define void @test_vsseg8_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %val, ptr %base, i64 %vl, <vscale x 2 x i1> %mask) {
3962; CHECK-LABEL: test_vsseg8_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_8t:
3963; CHECK:       # %bb.0: # %entry
3964; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
3965; CHECK-NEXT:    vsseg8e16.v v8, (a0), v0.t
3966; CHECK-NEXT:    ret
3967entry:
3968  tail call void @llvm.riscv.vsseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl, i64 4)
3969  ret void
3970}
3971
3972
3973define void @test_vsseg8_nxv4bf16_triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, i64 %vl) {
3974; CHECK-LABEL: test_vsseg8_nxv4bf16_triscv.vector.tuple_nxv8i8_8t:
3975; CHECK:       # %bb.0: # %entry
3976; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
3977; CHECK-NEXT:    vsseg8e16.v v8, (a0)
3978; CHECK-NEXT:    ret
3979entry:
3980  tail call void @llvm.riscv.vsseg8.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, i64 %vl, i64 4)
3981  ret void
3982}
3983
3984define void @test_vsseg8_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, i64 %vl, <vscale x 4 x i1> %mask) {
3985; CHECK-LABEL: test_vsseg8_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_8t:
3986; CHECK:       # %bb.0: # %entry
3987; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
3988; CHECK-NEXT:    vsseg8e16.v v8, (a0), v0.t
3989; CHECK-NEXT:    ret
3990entry:
3991  tail call void @llvm.riscv.vsseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, <vscale x 4 x i1> %mask, i64 %vl, i64 4)
3992  ret void
3993}
3994
3995