1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
2 // REQUIRES: riscv-registered-target
3 // RUN: %clang_cc1 -triple riscv64 -target-feature +v \
4 // RUN:   -target-feature +zvfbfmin \
5 // RUN:   -target-feature +zvfbfwma -disable-O0-optnone \
6 // RUN:   -emit-llvm %s -o - | opt -S -passes=mem2reg | \
7 // RUN:   FileCheck --check-prefix=CHECK-RV64 %s
8 
9 #include <riscv_vector.h>
10 
11 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 2) @test_vlseg2e16ff_v_bf16mf4x2_tu(
12 // CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 2 x i8>, 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
13 // CHECK-RV64-NEXT:  entry:
14 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { target("riscv.vector.tuple", <vscale x 2 x i8>, 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv2i8_2t.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) [[VD]], ptr [[RS1]], i64 [[VL]], i64 4)
15 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", <vscale x 2 x i8>, 2), i64 } [[TMP0]], 0
16 // CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", <vscale x 2 x i8>, 2), i64 } [[TMP0]], 1
17 // CHECK-RV64-NEXT:    store i64 [[TMP2]], ptr [[NEW_VL]], align 8
18 // CHECK-RV64-NEXT:    ret target("riscv.vector.tuple", <vscale x 2 x i8>, 2) [[TMP1]]
19 //
20 vbfloat16mf4x2_t test_vlseg2e16ff_v_bf16mf4x2_tu(vbfloat16mf4x2_t vd,
21                                                  const __bf16 *rs1,
22                                                  size_t *new_vl, size_t vl) {
23   return __riscv_vlseg2e16ff_tu(vd, rs1, new_vl, vl);
24 }
25 
26 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 2) @test_vlseg2e16ff_v_bf16mf2x2_tu(
27 // CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 4 x i8>, 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
28 // CHECK-RV64-NEXT:  entry:
29 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { target("riscv.vector.tuple", <vscale x 4 x i8>, 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv4i8_2t.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) [[VD]], ptr [[RS1]], i64 [[VL]], i64 4)
30 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", <vscale x 4 x i8>, 2), i64 } [[TMP0]], 0
31 // CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", <vscale x 4 x i8>, 2), i64 } [[TMP0]], 1
32 // CHECK-RV64-NEXT:    store i64 [[TMP2]], ptr [[NEW_VL]], align 8
33 // CHECK-RV64-NEXT:    ret target("riscv.vector.tuple", <vscale x 4 x i8>, 2) [[TMP1]]
34 //
35 vbfloat16mf2x2_t test_vlseg2e16ff_v_bf16mf2x2_tu(vbfloat16mf2x2_t vd,
36                                                  const __bf16 *rs1,
37                                                  size_t *new_vl, size_t vl) {
38   return __riscv_vlseg2e16ff_tu(vd, rs1, new_vl, vl);
39 }
40 
41 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 2) @test_vlseg2e16ff_v_bf16m1x2_tu(
42 // CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 8 x i8>, 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
43 // CHECK-RV64-NEXT:  entry:
44 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { target("riscv.vector.tuple", <vscale x 8 x i8>, 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv8i8_2t.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) [[VD]], ptr [[RS1]], i64 [[VL]], i64 4)
45 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", <vscale x 8 x i8>, 2), i64 } [[TMP0]], 0
46 // CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", <vscale x 8 x i8>, 2), i64 } [[TMP0]], 1
47 // CHECK-RV64-NEXT:    store i64 [[TMP2]], ptr [[NEW_VL]], align 8
48 // CHECK-RV64-NEXT:    ret target("riscv.vector.tuple", <vscale x 8 x i8>, 2) [[TMP1]]
49 //
50 vbfloat16m1x2_t test_vlseg2e16ff_v_bf16m1x2_tu(vbfloat16m1x2_t vd,
51                                                const __bf16 *rs1,
52                                                size_t *new_vl, size_t vl) {
53   return __riscv_vlseg2e16ff_tu(vd, rs1, new_vl, vl);
54 }
55 
56 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 16 x i8>, 2) @test_vlseg2e16ff_v_bf16m2x2_tu(
57 // CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 16 x i8>, 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
58 // CHECK-RV64-NEXT:  entry:
59 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { target("riscv.vector.tuple", <vscale x 16 x i8>, 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv16i8_2t.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) [[VD]], ptr [[RS1]], i64 [[VL]], i64 4)
60 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", <vscale x 16 x i8>, 2), i64 } [[TMP0]], 0
61 // CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", <vscale x 16 x i8>, 2), i64 } [[TMP0]], 1
62 // CHECK-RV64-NEXT:    store i64 [[TMP2]], ptr [[NEW_VL]], align 8
63 // CHECK-RV64-NEXT:    ret target("riscv.vector.tuple", <vscale x 16 x i8>, 2) [[TMP1]]
64 //
65 vbfloat16m2x2_t test_vlseg2e16ff_v_bf16m2x2_tu(vbfloat16m2x2_t vd,
66                                                const __bf16 *rs1,
67                                                size_t *new_vl, size_t vl) {
68   return __riscv_vlseg2e16ff_tu(vd, rs1, new_vl, vl);
69 }
70 
71 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 32 x i8>, 2) @test_vlseg2e16ff_v_bf16m4x2_tu(
72 // CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 32 x i8>, 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
73 // CHECK-RV64-NEXT:  entry:
74 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { target("riscv.vector.tuple", <vscale x 32 x i8>, 2), i64 } @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv32i8_2t.i64(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) [[VD]], ptr [[RS1]], i64 [[VL]], i64 4)
75 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", <vscale x 32 x i8>, 2), i64 } [[TMP0]], 0
76 // CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", <vscale x 32 x i8>, 2), i64 } [[TMP0]], 1
77 // CHECK-RV64-NEXT:    store i64 [[TMP2]], ptr [[NEW_VL]], align 8
78 // CHECK-RV64-NEXT:    ret target("riscv.vector.tuple", <vscale x 32 x i8>, 2) [[TMP1]]
79 //
80 vbfloat16m4x2_t test_vlseg2e16ff_v_bf16m4x2_tu(vbfloat16m4x2_t vd,
81                                                const __bf16 *rs1,
82                                                size_t *new_vl, size_t vl) {
83   return __riscv_vlseg2e16ff_tu(vd, rs1, new_vl, vl);
84 }
85 
86 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 2) @test_vlseg2e16ff_v_bf16mf4x2_tum(
87 // CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
88 // CHECK-RV64-NEXT:  entry:
89 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { target("riscv.vector.tuple", <vscale x 2 x i8>, 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv2i8_2t.i64.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) [[VD]], ptr [[RS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
90 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", <vscale x 2 x i8>, 2), i64 } [[TMP0]], 0
91 // CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", <vscale x 2 x i8>, 2), i64 } [[TMP0]], 1
92 // CHECK-RV64-NEXT:    store i64 [[TMP2]], ptr [[NEW_VL]], align 8
93 // CHECK-RV64-NEXT:    ret target("riscv.vector.tuple", <vscale x 2 x i8>, 2) [[TMP1]]
94 //
95 vbfloat16mf4x2_t test_vlseg2e16ff_v_bf16mf4x2_tum(vbool64_t vm,
96                                                   vbfloat16mf4x2_t vd,
97                                                   const __bf16 *rs1,
98                                                   size_t *new_vl, size_t vl) {
99   return __riscv_vlseg2e16ff_tum(vm, vd, rs1, new_vl, vl);
100 }
101 
102 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 2) @test_vlseg2e16ff_v_bf16mf2x2_tum(
103 // CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
104 // CHECK-RV64-NEXT:  entry:
105 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { target("riscv.vector.tuple", <vscale x 4 x i8>, 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) [[VD]], ptr [[RS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
106 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", <vscale x 4 x i8>, 2), i64 } [[TMP0]], 0
107 // CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", <vscale x 4 x i8>, 2), i64 } [[TMP0]], 1
108 // CHECK-RV64-NEXT:    store i64 [[TMP2]], ptr [[NEW_VL]], align 8
109 // CHECK-RV64-NEXT:    ret target("riscv.vector.tuple", <vscale x 4 x i8>, 2) [[TMP1]]
110 //
111 vbfloat16mf2x2_t test_vlseg2e16ff_v_bf16mf2x2_tum(vbool32_t vm,
112                                                   vbfloat16mf2x2_t vd,
113                                                   const __bf16 *rs1,
114                                                   size_t *new_vl, size_t vl) {
115   return __riscv_vlseg2e16ff_tum(vm, vd, rs1, new_vl, vl);
116 }
117 
118 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 2) @test_vlseg2e16ff_v_bf16m1x2_tum(
119 // CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
120 // CHECK-RV64-NEXT:  entry:
121 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { target("riscv.vector.tuple", <vscale x 8 x i8>, 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) [[VD]], ptr [[RS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
122 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", <vscale x 8 x i8>, 2), i64 } [[TMP0]], 0
123 // CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", <vscale x 8 x i8>, 2), i64 } [[TMP0]], 1
124 // CHECK-RV64-NEXT:    store i64 [[TMP2]], ptr [[NEW_VL]], align 8
125 // CHECK-RV64-NEXT:    ret target("riscv.vector.tuple", <vscale x 8 x i8>, 2) [[TMP1]]
126 //
127 vbfloat16m1x2_t test_vlseg2e16ff_v_bf16m1x2_tum(vbool16_t vm,
128                                                 vbfloat16m1x2_t vd,
129                                                 const __bf16 *rs1,
130                                                 size_t *new_vl, size_t vl) {
131   return __riscv_vlseg2e16ff_tum(vm, vd, rs1, new_vl, vl);
132 }
133 
134 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 16 x i8>, 2) @test_vlseg2e16ff_v_bf16m2x2_tum(
135 // CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 16 x i8>, 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
136 // CHECK-RV64-NEXT:  entry:
137 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { target("riscv.vector.tuple", <vscale x 16 x i8>, 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv8i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) [[VD]], ptr [[RS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
138 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", <vscale x 16 x i8>, 2), i64 } [[TMP0]], 0
139 // CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", <vscale x 16 x i8>, 2), i64 } [[TMP0]], 1
140 // CHECK-RV64-NEXT:    store i64 [[TMP2]], ptr [[NEW_VL]], align 8
141 // CHECK-RV64-NEXT:    ret target("riscv.vector.tuple", <vscale x 16 x i8>, 2) [[TMP1]]
142 //
143 vbfloat16m2x2_t test_vlseg2e16ff_v_bf16m2x2_tum(vbool8_t vm, vbfloat16m2x2_t vd,
144                                                 const __bf16 *rs1,
145                                                 size_t *new_vl, size_t vl) {
146   return __riscv_vlseg2e16ff_tum(vm, vd, rs1, new_vl, vl);
147 }
148 
149 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 32 x i8>, 2) @test_vlseg2e16ff_v_bf16m4x2_tum(
150 // CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 32 x i8>, 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
151 // CHECK-RV64-NEXT:  entry:
152 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { target("riscv.vector.tuple", <vscale x 32 x i8>, 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv16i1(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) [[VD]], ptr [[RS1]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
153 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", <vscale x 32 x i8>, 2), i64 } [[TMP0]], 0
154 // CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", <vscale x 32 x i8>, 2), i64 } [[TMP0]], 1
155 // CHECK-RV64-NEXT:    store i64 [[TMP2]], ptr [[NEW_VL]], align 8
156 // CHECK-RV64-NEXT:    ret target("riscv.vector.tuple", <vscale x 32 x i8>, 2) [[TMP1]]
157 //
158 vbfloat16m4x2_t test_vlseg2e16ff_v_bf16m4x2_tum(vbool4_t vm, vbfloat16m4x2_t vd,
159                                                 const __bf16 *rs1,
160                                                 size_t *new_vl, size_t vl) {
161   return __riscv_vlseg2e16ff_tum(vm, vd, rs1, new_vl, vl);
162 }
163 
164 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 2) @test_vlseg2e16ff_v_bf16mf4x2_tumu(
165 // CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
166 // CHECK-RV64-NEXT:  entry:
167 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { target("riscv.vector.tuple", <vscale x 2 x i8>, 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv2i8_2t.i64.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) [[VD]], ptr [[RS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
168 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", <vscale x 2 x i8>, 2), i64 } [[TMP0]], 0
169 // CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", <vscale x 2 x i8>, 2), i64 } [[TMP0]], 1
170 // CHECK-RV64-NEXT:    store i64 [[TMP2]], ptr [[NEW_VL]], align 8
171 // CHECK-RV64-NEXT:    ret target("riscv.vector.tuple", <vscale x 2 x i8>, 2) [[TMP1]]
172 //
173 vbfloat16mf4x2_t test_vlseg2e16ff_v_bf16mf4x2_tumu(vbool64_t vm,
174                                                    vbfloat16mf4x2_t vd,
175                                                    const __bf16 *rs1,
176                                                    size_t *new_vl, size_t vl) {
177   return __riscv_vlseg2e16ff_tumu(vm, vd, rs1, new_vl, vl);
178 }
179 
180 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 2) @test_vlseg2e16ff_v_bf16mf2x2_tumu(
181 // CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
182 // CHECK-RV64-NEXT:  entry:
183 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { target("riscv.vector.tuple", <vscale x 4 x i8>, 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) [[VD]], ptr [[RS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
184 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", <vscale x 4 x i8>, 2), i64 } [[TMP0]], 0
185 // CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", <vscale x 4 x i8>, 2), i64 } [[TMP0]], 1
186 // CHECK-RV64-NEXT:    store i64 [[TMP2]], ptr [[NEW_VL]], align 8
187 // CHECK-RV64-NEXT:    ret target("riscv.vector.tuple", <vscale x 4 x i8>, 2) [[TMP1]]
188 //
189 vbfloat16mf2x2_t test_vlseg2e16ff_v_bf16mf2x2_tumu(vbool32_t vm,
190                                                    vbfloat16mf2x2_t vd,
191                                                    const __bf16 *rs1,
192                                                    size_t *new_vl, size_t vl) {
193   return __riscv_vlseg2e16ff_tumu(vm, vd, rs1, new_vl, vl);
194 }
195 
196 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 2) @test_vlseg2e16ff_v_bf16m1x2_tumu(
197 // CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
198 // CHECK-RV64-NEXT:  entry:
199 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { target("riscv.vector.tuple", <vscale x 8 x i8>, 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) [[VD]], ptr [[RS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
200 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", <vscale x 8 x i8>, 2), i64 } [[TMP0]], 0
201 // CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", <vscale x 8 x i8>, 2), i64 } [[TMP0]], 1
202 // CHECK-RV64-NEXT:    store i64 [[TMP2]], ptr [[NEW_VL]], align 8
203 // CHECK-RV64-NEXT:    ret target("riscv.vector.tuple", <vscale x 8 x i8>, 2) [[TMP1]]
204 //
205 vbfloat16m1x2_t test_vlseg2e16ff_v_bf16m1x2_tumu(vbool16_t vm,
206                                                  vbfloat16m1x2_t vd,
207                                                  const __bf16 *rs1,
208                                                  size_t *new_vl, size_t vl) {
209   return __riscv_vlseg2e16ff_tumu(vm, vd, rs1, new_vl, vl);
210 }
211 
212 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 16 x i8>, 2) @test_vlseg2e16ff_v_bf16m2x2_tumu(
213 // CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 16 x i8>, 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
214 // CHECK-RV64-NEXT:  entry:
215 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { target("riscv.vector.tuple", <vscale x 16 x i8>, 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv8i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) [[VD]], ptr [[RS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
216 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", <vscale x 16 x i8>, 2), i64 } [[TMP0]], 0
217 // CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", <vscale x 16 x i8>, 2), i64 } [[TMP0]], 1
218 // CHECK-RV64-NEXT:    store i64 [[TMP2]], ptr [[NEW_VL]], align 8
219 // CHECK-RV64-NEXT:    ret target("riscv.vector.tuple", <vscale x 16 x i8>, 2) [[TMP1]]
220 //
221 vbfloat16m2x2_t test_vlseg2e16ff_v_bf16m2x2_tumu(vbool8_t vm,
222                                                  vbfloat16m2x2_t vd,
223                                                  const __bf16 *rs1,
224                                                  size_t *new_vl, size_t vl) {
225   return __riscv_vlseg2e16ff_tumu(vm, vd, rs1, new_vl, vl);
226 }
227 
228 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 32 x i8>, 2) @test_vlseg2e16ff_v_bf16m4x2_tumu(
229 // CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 32 x i8>, 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
230 // CHECK-RV64-NEXT:  entry:
231 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { target("riscv.vector.tuple", <vscale x 32 x i8>, 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv16i1(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) [[VD]], ptr [[RS1]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
232 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", <vscale x 32 x i8>, 2), i64 } [[TMP0]], 0
233 // CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", <vscale x 32 x i8>, 2), i64 } [[TMP0]], 1
234 // CHECK-RV64-NEXT:    store i64 [[TMP2]], ptr [[NEW_VL]], align 8
235 // CHECK-RV64-NEXT:    ret target("riscv.vector.tuple", <vscale x 32 x i8>, 2) [[TMP1]]
236 //
237 vbfloat16m4x2_t test_vlseg2e16ff_v_bf16m4x2_tumu(vbool4_t vm,
238                                                  vbfloat16m4x2_t vd,
239                                                  const __bf16 *rs1,
240                                                  size_t *new_vl, size_t vl) {
241   return __riscv_vlseg2e16ff_tumu(vm, vd, rs1, new_vl, vl);
242 }
243 
244 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 2) @test_vlseg2e16ff_v_bf16mf4x2_mu(
245 // CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
246 // CHECK-RV64-NEXT:  entry:
247 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { target("riscv.vector.tuple", <vscale x 2 x i8>, 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv2i8_2t.i64.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) [[VD]], ptr [[RS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
248 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", <vscale x 2 x i8>, 2), i64 } [[TMP0]], 0
249 // CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", <vscale x 2 x i8>, 2), i64 } [[TMP0]], 1
250 // CHECK-RV64-NEXT:    store i64 [[TMP2]], ptr [[NEW_VL]], align 8
251 // CHECK-RV64-NEXT:    ret target("riscv.vector.tuple", <vscale x 2 x i8>, 2) [[TMP1]]
252 //
253 vbfloat16mf4x2_t test_vlseg2e16ff_v_bf16mf4x2_mu(vbool64_t vm,
254                                                  vbfloat16mf4x2_t vd,
255                                                  const __bf16 *rs1,
256                                                  size_t *new_vl, size_t vl) {
257   return __riscv_vlseg2e16ff_mu(vm, vd, rs1, new_vl, vl);
258 }
259 
260 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 2) @test_vlseg2e16ff_v_bf16mf2x2_mu(
261 // CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
262 // CHECK-RV64-NEXT:  entry:
263 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { target("riscv.vector.tuple", <vscale x 4 x i8>, 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv4i8_2t.i64.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) [[VD]], ptr [[RS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
264 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", <vscale x 4 x i8>, 2), i64 } [[TMP0]], 0
265 // CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", <vscale x 4 x i8>, 2), i64 } [[TMP0]], 1
266 // CHECK-RV64-NEXT:    store i64 [[TMP2]], ptr [[NEW_VL]], align 8
267 // CHECK-RV64-NEXT:    ret target("riscv.vector.tuple", <vscale x 4 x i8>, 2) [[TMP1]]
268 //
269 vbfloat16mf2x2_t test_vlseg2e16ff_v_bf16mf2x2_mu(vbool32_t vm,
270                                                  vbfloat16mf2x2_t vd,
271                                                  const __bf16 *rs1,
272                                                  size_t *new_vl, size_t vl) {
273   return __riscv_vlseg2e16ff_mu(vm, vd, rs1, new_vl, vl);
274 }
275 
276 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 2) @test_vlseg2e16ff_v_bf16m1x2_mu(
277 // CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
278 // CHECK-RV64-NEXT:  entry:
279 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { target("riscv.vector.tuple", <vscale x 8 x i8>, 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t.i64.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) [[VD]], ptr [[RS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
280 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", <vscale x 8 x i8>, 2), i64 } [[TMP0]], 0
281 // CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", <vscale x 8 x i8>, 2), i64 } [[TMP0]], 1
282 // CHECK-RV64-NEXT:    store i64 [[TMP2]], ptr [[NEW_VL]], align 8
283 // CHECK-RV64-NEXT:    ret target("riscv.vector.tuple", <vscale x 8 x i8>, 2) [[TMP1]]
284 //
285 vbfloat16m1x2_t test_vlseg2e16ff_v_bf16m1x2_mu(vbool16_t vm, vbfloat16m1x2_t vd,
286                                                const __bf16 *rs1,
287                                                size_t *new_vl, size_t vl) {
288   return __riscv_vlseg2e16ff_mu(vm, vd, rs1, new_vl, vl);
289 }
290 
291 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 16 x i8>, 2) @test_vlseg2e16ff_v_bf16m2x2_mu(
292 // CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 16 x i8>, 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
293 // CHECK-RV64-NEXT:  entry:
294 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { target("riscv.vector.tuple", <vscale x 16 x i8>, 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv16i8_2t.i64.nxv8i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) [[VD]], ptr [[RS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
295 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", <vscale x 16 x i8>, 2), i64 } [[TMP0]], 0
296 // CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", <vscale x 16 x i8>, 2), i64 } [[TMP0]], 1
297 // CHECK-RV64-NEXT:    store i64 [[TMP2]], ptr [[NEW_VL]], align 8
298 // CHECK-RV64-NEXT:    ret target("riscv.vector.tuple", <vscale x 16 x i8>, 2) [[TMP1]]
299 //
300 vbfloat16m2x2_t test_vlseg2e16ff_v_bf16m2x2_mu(vbool8_t vm, vbfloat16m2x2_t vd,
301                                                const __bf16 *rs1,
302                                                size_t *new_vl, size_t vl) {
303   return __riscv_vlseg2e16ff_mu(vm, vd, rs1, new_vl, vl);
304 }
305 
306 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 32 x i8>, 2) @test_vlseg2e16ff_v_bf16m4x2_mu(
307 // CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 32 x i8>, 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
308 // CHECK-RV64-NEXT:  entry:
309 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { target("riscv.vector.tuple", <vscale x 32 x i8>, 2), i64 } @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.i64.nxv16i1(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) [[VD]], ptr [[RS1]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
310 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", <vscale x 32 x i8>, 2), i64 } [[TMP0]], 0
311 // CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", <vscale x 32 x i8>, 2), i64 } [[TMP0]], 1
312 // CHECK-RV64-NEXT:    store i64 [[TMP2]], ptr [[NEW_VL]], align 8
313 // CHECK-RV64-NEXT:    ret target("riscv.vector.tuple", <vscale x 32 x i8>, 2) [[TMP1]]
314 //
315 vbfloat16m4x2_t test_vlseg2e16ff_v_bf16m4x2_mu(vbool4_t vm, vbfloat16m4x2_t vd,
316                                                const __bf16 *rs1,
317                                                size_t *new_vl, size_t vl) {
318   return __riscv_vlseg2e16ff_mu(vm, vd, rs1, new_vl, vl);
319 }
320