1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
2 // REQUIRES: riscv-registered-target
3 // RUN: %clang_cc1 -triple riscv64 -target-feature +v \
4 // RUN:   -target-feature +zvfbfmin \
5 // RUN:   -target-feature +zvfbfwma -disable-O0-optnone \
6 // RUN:   -emit-llvm %s -o - | opt -S -passes=mem2reg | \
7 // RUN:   FileCheck --check-prefix=CHECK-RV64 %s
8 
9 #include <riscv_vector.h>
10 
11 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 3) @test_vlseg3e16ff_v_bf16mf4x3_tu(
12 // CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 2 x i8>, 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
13 // CHECK-RV64-NEXT:  entry:
14 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { target("riscv.vector.tuple", <vscale x 2 x i8>, 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv2i8_3t.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) [[VD]], ptr [[RS1]], i64 [[VL]], i64 4)
15 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", <vscale x 2 x i8>, 3), i64 } [[TMP0]], 0
16 // CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", <vscale x 2 x i8>, 3), i64 } [[TMP0]], 1
17 // CHECK-RV64-NEXT:    store i64 [[TMP2]], ptr [[NEW_VL]], align 8
18 // CHECK-RV64-NEXT:    ret target("riscv.vector.tuple", <vscale x 2 x i8>, 3) [[TMP1]]
19 //
20 vbfloat16mf4x3_t test_vlseg3e16ff_v_bf16mf4x3_tu(vbfloat16mf4x3_t vd,
21                                                  const __bf16 *rs1,
22                                                  size_t *new_vl, size_t vl) {
23   return __riscv_vlseg3e16ff_tu(vd, rs1, new_vl, vl);
24 }
25 
26 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 3) @test_vlseg3e16ff_v_bf16mf2x3_tu(
27 // CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 4 x i8>, 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
28 // CHECK-RV64-NEXT:  entry:
29 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { target("riscv.vector.tuple", <vscale x 4 x i8>, 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv4i8_3t.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) [[VD]], ptr [[RS1]], i64 [[VL]], i64 4)
30 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", <vscale x 4 x i8>, 3), i64 } [[TMP0]], 0
31 // CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", <vscale x 4 x i8>, 3), i64 } [[TMP0]], 1
32 // CHECK-RV64-NEXT:    store i64 [[TMP2]], ptr [[NEW_VL]], align 8
33 // CHECK-RV64-NEXT:    ret target("riscv.vector.tuple", <vscale x 4 x i8>, 3) [[TMP1]]
34 //
35 vbfloat16mf2x3_t test_vlseg3e16ff_v_bf16mf2x3_tu(vbfloat16mf2x3_t vd,
36                                                  const __bf16 *rs1,
37                                                  size_t *new_vl, size_t vl) {
38   return __riscv_vlseg3e16ff_tu(vd, rs1, new_vl, vl);
39 }
40 
41 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 3) @test_vlseg3e16ff_v_bf16m1x3_tu(
42 // CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 8 x i8>, 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
43 // CHECK-RV64-NEXT:  entry:
44 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { target("riscv.vector.tuple", <vscale x 8 x i8>, 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv8i8_3t.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) [[VD]], ptr [[RS1]], i64 [[VL]], i64 4)
45 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", <vscale x 8 x i8>, 3), i64 } [[TMP0]], 0
46 // CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", <vscale x 8 x i8>, 3), i64 } [[TMP0]], 1
47 // CHECK-RV64-NEXT:    store i64 [[TMP2]], ptr [[NEW_VL]], align 8
48 // CHECK-RV64-NEXT:    ret target("riscv.vector.tuple", <vscale x 8 x i8>, 3) [[TMP1]]
49 //
50 vbfloat16m1x3_t test_vlseg3e16ff_v_bf16m1x3_tu(vbfloat16m1x3_t vd,
51                                                const __bf16 *rs1,
52                                                size_t *new_vl, size_t vl) {
53   return __riscv_vlseg3e16ff_tu(vd, rs1, new_vl, vl);
54 }
55 
56 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 16 x i8>, 3) @test_vlseg3e16ff_v_bf16m2x3_tu(
57 // CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 16 x i8>, 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
58 // CHECK-RV64-NEXT:  entry:
59 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { target("riscv.vector.tuple", <vscale x 16 x i8>, 3), i64 } @llvm.riscv.vlseg3ff.triscv.vector.tuple_nxv16i8_3t.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) [[VD]], ptr [[RS1]], i64 [[VL]], i64 4)
60 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", <vscale x 16 x i8>, 3), i64 } [[TMP0]], 0
61 // CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", <vscale x 16 x i8>, 3), i64 } [[TMP0]], 1
62 // CHECK-RV64-NEXT:    store i64 [[TMP2]], ptr [[NEW_VL]], align 8
63 // CHECK-RV64-NEXT:    ret target("riscv.vector.tuple", <vscale x 16 x i8>, 3) [[TMP1]]
64 //
65 vbfloat16m2x3_t test_vlseg3e16ff_v_bf16m2x3_tu(vbfloat16m2x3_t vd,
66                                                const __bf16 *rs1,
67                                                size_t *new_vl, size_t vl) {
68   return __riscv_vlseg3e16ff_tu(vd, rs1, new_vl, vl);
69 }
70 
71 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 3) @test_vlseg3e16ff_v_bf16mf4x3_tum(
72 // CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
73 // CHECK-RV64-NEXT:  entry:
74 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { target("riscv.vector.tuple", <vscale x 2 x i8>, 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv2i8_3t.i64.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) [[VD]], ptr [[RS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
75 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", <vscale x 2 x i8>, 3), i64 } [[TMP0]], 0
76 // CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", <vscale x 2 x i8>, 3), i64 } [[TMP0]], 1
77 // CHECK-RV64-NEXT:    store i64 [[TMP2]], ptr [[NEW_VL]], align 8
78 // CHECK-RV64-NEXT:    ret target("riscv.vector.tuple", <vscale x 2 x i8>, 3) [[TMP1]]
79 //
80 vbfloat16mf4x3_t test_vlseg3e16ff_v_bf16mf4x3_tum(vbool64_t vm,
81                                                   vbfloat16mf4x3_t vd,
82                                                   const __bf16 *rs1,
83                                                   size_t *new_vl, size_t vl) {
84   return __riscv_vlseg3e16ff_tum(vm, vd, rs1, new_vl, vl);
85 }
86 
87 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 3) @test_vlseg3e16ff_v_bf16mf2x3_tum(
88 // CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
89 // CHECK-RV64-NEXT:  entry:
90 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { target("riscv.vector.tuple", <vscale x 4 x i8>, 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) [[VD]], ptr [[RS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
91 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", <vscale x 4 x i8>, 3), i64 } [[TMP0]], 0
92 // CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", <vscale x 4 x i8>, 3), i64 } [[TMP0]], 1
93 // CHECK-RV64-NEXT:    store i64 [[TMP2]], ptr [[NEW_VL]], align 8
94 // CHECK-RV64-NEXT:    ret target("riscv.vector.tuple", <vscale x 4 x i8>, 3) [[TMP1]]
95 //
96 vbfloat16mf2x3_t test_vlseg3e16ff_v_bf16mf2x3_tum(vbool32_t vm,
97                                                   vbfloat16mf2x3_t vd,
98                                                   const __bf16 *rs1,
99                                                   size_t *new_vl, size_t vl) {
100   return __riscv_vlseg3e16ff_tum(vm, vd, rs1, new_vl, vl);
101 }
102 
103 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 3) @test_vlseg3e16ff_v_bf16m1x3_tum(
104 // CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
105 // CHECK-RV64-NEXT:  entry:
106 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { target("riscv.vector.tuple", <vscale x 8 x i8>, 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) [[VD]], ptr [[RS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
107 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", <vscale x 8 x i8>, 3), i64 } [[TMP0]], 0
108 // CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", <vscale x 8 x i8>, 3), i64 } [[TMP0]], 1
109 // CHECK-RV64-NEXT:    store i64 [[TMP2]], ptr [[NEW_VL]], align 8
110 // CHECK-RV64-NEXT:    ret target("riscv.vector.tuple", <vscale x 8 x i8>, 3) [[TMP1]]
111 //
112 vbfloat16m1x3_t test_vlseg3e16ff_v_bf16m1x3_tum(vbool16_t vm,
113                                                 vbfloat16m1x3_t vd,
114                                                 const __bf16 *rs1,
115                                                 size_t *new_vl, size_t vl) {
116   return __riscv_vlseg3e16ff_tum(vm, vd, rs1, new_vl, vl);
117 }
118 
119 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 16 x i8>, 3) @test_vlseg3e16ff_v_bf16m2x3_tum(
120 // CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 16 x i8>, 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
121 // CHECK-RV64-NEXT:  entry:
122 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { target("riscv.vector.tuple", <vscale x 16 x i8>, 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv8i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) [[VD]], ptr [[RS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
123 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", <vscale x 16 x i8>, 3), i64 } [[TMP0]], 0
124 // CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", <vscale x 16 x i8>, 3), i64 } [[TMP0]], 1
125 // CHECK-RV64-NEXT:    store i64 [[TMP2]], ptr [[NEW_VL]], align 8
126 // CHECK-RV64-NEXT:    ret target("riscv.vector.tuple", <vscale x 16 x i8>, 3) [[TMP1]]
127 //
128 vbfloat16m2x3_t test_vlseg3e16ff_v_bf16m2x3_tum(vbool8_t vm, vbfloat16m2x3_t vd,
129                                                 const __bf16 *rs1,
130                                                 size_t *new_vl, size_t vl) {
131   return __riscv_vlseg3e16ff_tum(vm, vd, rs1, new_vl, vl);
132 }
133 
134 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 3) @test_vlseg3e16ff_v_bf16mf4x3_tumu(
135 // CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
136 // CHECK-RV64-NEXT:  entry:
137 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { target("riscv.vector.tuple", <vscale x 2 x i8>, 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv2i8_3t.i64.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) [[VD]], ptr [[RS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
138 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", <vscale x 2 x i8>, 3), i64 } [[TMP0]], 0
139 // CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", <vscale x 2 x i8>, 3), i64 } [[TMP0]], 1
140 // CHECK-RV64-NEXT:    store i64 [[TMP2]], ptr [[NEW_VL]], align 8
141 // CHECK-RV64-NEXT:    ret target("riscv.vector.tuple", <vscale x 2 x i8>, 3) [[TMP1]]
142 //
143 vbfloat16mf4x3_t test_vlseg3e16ff_v_bf16mf4x3_tumu(vbool64_t vm,
144                                                    vbfloat16mf4x3_t vd,
145                                                    const __bf16 *rs1,
146                                                    size_t *new_vl, size_t vl) {
147   return __riscv_vlseg3e16ff_tumu(vm, vd, rs1, new_vl, vl);
148 }
149 
150 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 3) @test_vlseg3e16ff_v_bf16mf2x3_tumu(
151 // CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
152 // CHECK-RV64-NEXT:  entry:
153 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { target("riscv.vector.tuple", <vscale x 4 x i8>, 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) [[VD]], ptr [[RS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
154 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", <vscale x 4 x i8>, 3), i64 } [[TMP0]], 0
155 // CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", <vscale x 4 x i8>, 3), i64 } [[TMP0]], 1
156 // CHECK-RV64-NEXT:    store i64 [[TMP2]], ptr [[NEW_VL]], align 8
157 // CHECK-RV64-NEXT:    ret target("riscv.vector.tuple", <vscale x 4 x i8>, 3) [[TMP1]]
158 //
159 vbfloat16mf2x3_t test_vlseg3e16ff_v_bf16mf2x3_tumu(vbool32_t vm,
160                                                    vbfloat16mf2x3_t vd,
161                                                    const __bf16 *rs1,
162                                                    size_t *new_vl, size_t vl) {
163   return __riscv_vlseg3e16ff_tumu(vm, vd, rs1, new_vl, vl);
164 }
165 
166 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 3) @test_vlseg3e16ff_v_bf16m1x3_tumu(
167 // CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
168 // CHECK-RV64-NEXT:  entry:
169 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { target("riscv.vector.tuple", <vscale x 8 x i8>, 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) [[VD]], ptr [[RS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
170 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", <vscale x 8 x i8>, 3), i64 } [[TMP0]], 0
171 // CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", <vscale x 8 x i8>, 3), i64 } [[TMP0]], 1
172 // CHECK-RV64-NEXT:    store i64 [[TMP2]], ptr [[NEW_VL]], align 8
173 // CHECK-RV64-NEXT:    ret target("riscv.vector.tuple", <vscale x 8 x i8>, 3) [[TMP1]]
174 //
175 vbfloat16m1x3_t test_vlseg3e16ff_v_bf16m1x3_tumu(vbool16_t vm,
176                                                  vbfloat16m1x3_t vd,
177                                                  const __bf16 *rs1,
178                                                  size_t *new_vl, size_t vl) {
179   return __riscv_vlseg3e16ff_tumu(vm, vd, rs1, new_vl, vl);
180 }
181 
182 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 16 x i8>, 3) @test_vlseg3e16ff_v_bf16m2x3_tumu(
183 // CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 16 x i8>, 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
184 // CHECK-RV64-NEXT:  entry:
185 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { target("riscv.vector.tuple", <vscale x 16 x i8>, 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv8i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) [[VD]], ptr [[RS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
186 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", <vscale x 16 x i8>, 3), i64 } [[TMP0]], 0
187 // CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", <vscale x 16 x i8>, 3), i64 } [[TMP0]], 1
188 // CHECK-RV64-NEXT:    store i64 [[TMP2]], ptr [[NEW_VL]], align 8
189 // CHECK-RV64-NEXT:    ret target("riscv.vector.tuple", <vscale x 16 x i8>, 3) [[TMP1]]
190 //
191 vbfloat16m2x3_t test_vlseg3e16ff_v_bf16m2x3_tumu(vbool8_t vm,
192                                                  vbfloat16m2x3_t vd,
193                                                  const __bf16 *rs1,
194                                                  size_t *new_vl, size_t vl) {
195   return __riscv_vlseg3e16ff_tumu(vm, vd, rs1, new_vl, vl);
196 }
197 
198 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 3) @test_vlseg3e16ff_v_bf16mf4x3_mu(
199 // CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
200 // CHECK-RV64-NEXT:  entry:
201 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { target("riscv.vector.tuple", <vscale x 2 x i8>, 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv2i8_3t.i64.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) [[VD]], ptr [[RS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
202 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", <vscale x 2 x i8>, 3), i64 } [[TMP0]], 0
203 // CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", <vscale x 2 x i8>, 3), i64 } [[TMP0]], 1
204 // CHECK-RV64-NEXT:    store i64 [[TMP2]], ptr [[NEW_VL]], align 8
205 // CHECK-RV64-NEXT:    ret target("riscv.vector.tuple", <vscale x 2 x i8>, 3) [[TMP1]]
206 //
207 vbfloat16mf4x3_t test_vlseg3e16ff_v_bf16mf4x3_mu(vbool64_t vm,
208                                                  vbfloat16mf4x3_t vd,
209                                                  const __bf16 *rs1,
210                                                  size_t *new_vl, size_t vl) {
211   return __riscv_vlseg3e16ff_mu(vm, vd, rs1, new_vl, vl);
212 }
213 
214 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 3) @test_vlseg3e16ff_v_bf16mf2x3_mu(
215 // CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
216 // CHECK-RV64-NEXT:  entry:
217 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { target("riscv.vector.tuple", <vscale x 4 x i8>, 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv4i8_3t.i64.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) [[VD]], ptr [[RS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
218 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", <vscale x 4 x i8>, 3), i64 } [[TMP0]], 0
219 // CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", <vscale x 4 x i8>, 3), i64 } [[TMP0]], 1
220 // CHECK-RV64-NEXT:    store i64 [[TMP2]], ptr [[NEW_VL]], align 8
221 // CHECK-RV64-NEXT:    ret target("riscv.vector.tuple", <vscale x 4 x i8>, 3) [[TMP1]]
222 //
223 vbfloat16mf2x3_t test_vlseg3e16ff_v_bf16mf2x3_mu(vbool32_t vm,
224                                                  vbfloat16mf2x3_t vd,
225                                                  const __bf16 *rs1,
226                                                  size_t *new_vl, size_t vl) {
227   return __riscv_vlseg3e16ff_mu(vm, vd, rs1, new_vl, vl);
228 }
229 
230 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 3) @test_vlseg3e16ff_v_bf16m1x3_mu(
231 // CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
232 // CHECK-RV64-NEXT:  entry:
233 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { target("riscv.vector.tuple", <vscale x 8 x i8>, 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv8i8_3t.i64.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) [[VD]], ptr [[RS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
234 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", <vscale x 8 x i8>, 3), i64 } [[TMP0]], 0
235 // CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", <vscale x 8 x i8>, 3), i64 } [[TMP0]], 1
236 // CHECK-RV64-NEXT:    store i64 [[TMP2]], ptr [[NEW_VL]], align 8
237 // CHECK-RV64-NEXT:    ret target("riscv.vector.tuple", <vscale x 8 x i8>, 3) [[TMP1]]
238 //
239 vbfloat16m1x3_t test_vlseg3e16ff_v_bf16m1x3_mu(vbool16_t vm, vbfloat16m1x3_t vd,
240                                                const __bf16 *rs1,
241                                                size_t *new_vl, size_t vl) {
242   return __riscv_vlseg3e16ff_mu(vm, vd, rs1, new_vl, vl);
243 }
244 
245 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 16 x i8>, 3) @test_vlseg3e16ff_v_bf16m2x3_mu(
246 // CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 16 x i8>, 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
247 // CHECK-RV64-NEXT:  entry:
248 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { target("riscv.vector.tuple", <vscale x 16 x i8>, 3), i64 } @llvm.riscv.vlseg3ff.mask.triscv.vector.tuple_nxv16i8_3t.i64.nxv8i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) [[VD]], ptr [[RS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
249 // CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", <vscale x 16 x i8>, 3), i64 } [[TMP0]], 0
250 // CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", <vscale x 16 x i8>, 3), i64 } [[TMP0]], 1
251 // CHECK-RV64-NEXT:    store i64 [[TMP2]], ptr [[NEW_VL]], align 8
252 // CHECK-RV64-NEXT:    ret target("riscv.vector.tuple", <vscale x 16 x i8>, 3) [[TMP1]]
253 //
254 vbfloat16m2x3_t test_vlseg3e16ff_v_bf16m2x3_mu(vbool8_t vm, vbfloat16m2x3_t vd,
255                                                const __bf16 *rs1,
256                                                size_t *new_vl, size_t vl) {
257   return __riscv_vlseg3e16ff_mu(vm, vd, rs1, new_vl, vl);
258 }
259