1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 2
2 // REQUIRES: riscv-registered-target
3 // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \
4 // RUN: -target-feature +zvfh -disable-O0-optnone \
5 // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
6 // RUN: FileCheck --check-prefix=CHECK-RV64 %s
7
8 #include <riscv_vector.h>
9
10 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x half> @test_vfslide1down_vf_f16mf4_tu
11 // CHECK-RV64-SAME: (<vscale x 1 x half> [[MASKEDOFF:%.*]], <vscale x 1 x half> [[SRC:%.*]], half noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
12 // CHECK-RV64-NEXT: entry:
13 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfslide1down.nxv1f16.f16.i64(<vscale x 1 x half> [[MASKEDOFF]], <vscale x 1 x half> [[SRC]], half [[VALUE]], i64 [[VL]])
14 // CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
15 //
test_vfslide1down_vf_f16mf4_tu(vfloat16mf4_t maskedoff,vfloat16mf4_t src,_Float16 value,size_t vl)16 vfloat16mf4_t test_vfslide1down_vf_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t src, _Float16 value, size_t vl) {
17 return __riscv_vfslide1down_tu(maskedoff, src, value, vl);
18 }
19
20 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x half> @test_vfslide1down_vf_f16mf2_tu
21 // CHECK-RV64-SAME: (<vscale x 2 x half> [[MASKEDOFF:%.*]], <vscale x 2 x half> [[SRC:%.*]], half noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
22 // CHECK-RV64-NEXT: entry:
23 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfslide1down.nxv2f16.f16.i64(<vscale x 2 x half> [[MASKEDOFF]], <vscale x 2 x half> [[SRC]], half [[VALUE]], i64 [[VL]])
24 // CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
25 //
test_vfslide1down_vf_f16mf2_tu(vfloat16mf2_t maskedoff,vfloat16mf2_t src,_Float16 value,size_t vl)26 vfloat16mf2_t test_vfslide1down_vf_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t src, _Float16 value, size_t vl) {
27 return __riscv_vfslide1down_tu(maskedoff, src, value, vl);
28 }
29
30 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x half> @test_vfslide1down_vf_f16m1_tu
31 // CHECK-RV64-SAME: (<vscale x 4 x half> [[MASKEDOFF:%.*]], <vscale x 4 x half> [[SRC:%.*]], half noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
32 // CHECK-RV64-NEXT: entry:
33 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfslide1down.nxv4f16.f16.i64(<vscale x 4 x half> [[MASKEDOFF]], <vscale x 4 x half> [[SRC]], half [[VALUE]], i64 [[VL]])
34 // CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
35 //
test_vfslide1down_vf_f16m1_tu(vfloat16m1_t maskedoff,vfloat16m1_t src,_Float16 value,size_t vl)36 vfloat16m1_t test_vfslide1down_vf_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t src, _Float16 value, size_t vl) {
37 return __riscv_vfslide1down_tu(maskedoff, src, value, vl);
38 }
39
40 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x half> @test_vfslide1down_vf_f16m2_tu
41 // CHECK-RV64-SAME: (<vscale x 8 x half> [[MASKEDOFF:%.*]], <vscale x 8 x half> [[SRC:%.*]], half noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
42 // CHECK-RV64-NEXT: entry:
43 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfslide1down.nxv8f16.f16.i64(<vscale x 8 x half> [[MASKEDOFF]], <vscale x 8 x half> [[SRC]], half [[VALUE]], i64 [[VL]])
44 // CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
45 //
test_vfslide1down_vf_f16m2_tu(vfloat16m2_t maskedoff,vfloat16m2_t src,_Float16 value,size_t vl)46 vfloat16m2_t test_vfslide1down_vf_f16m2_tu(vfloat16m2_t maskedoff, vfloat16m2_t src, _Float16 value, size_t vl) {
47 return __riscv_vfslide1down_tu(maskedoff, src, value, vl);
48 }
49
50 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x half> @test_vfslide1down_vf_f16m4_tu
51 // CHECK-RV64-SAME: (<vscale x 16 x half> [[MASKEDOFF:%.*]], <vscale x 16 x half> [[SRC:%.*]], half noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
52 // CHECK-RV64-NEXT: entry:
53 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfslide1down.nxv16f16.f16.i64(<vscale x 16 x half> [[MASKEDOFF]], <vscale x 16 x half> [[SRC]], half [[VALUE]], i64 [[VL]])
54 // CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
55 //
test_vfslide1down_vf_f16m4_tu(vfloat16m4_t maskedoff,vfloat16m4_t src,_Float16 value,size_t vl)56 vfloat16m4_t test_vfslide1down_vf_f16m4_tu(vfloat16m4_t maskedoff, vfloat16m4_t src, _Float16 value, size_t vl) {
57 return __riscv_vfslide1down_tu(maskedoff, src, value, vl);
58 }
59
60 // CHECK-RV64-LABEL: define dso_local <vscale x 32 x half> @test_vfslide1down_vf_f16m8_tu
61 // CHECK-RV64-SAME: (<vscale x 32 x half> [[MASKEDOFF:%.*]], <vscale x 32 x half> [[SRC:%.*]], half noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
62 // CHECK-RV64-NEXT: entry:
63 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfslide1down.nxv32f16.f16.i64(<vscale x 32 x half> [[MASKEDOFF]], <vscale x 32 x half> [[SRC]], half [[VALUE]], i64 [[VL]])
64 // CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
65 //
test_vfslide1down_vf_f16m8_tu(vfloat16m8_t maskedoff,vfloat16m8_t src,_Float16 value,size_t vl)66 vfloat16m8_t test_vfslide1down_vf_f16m8_tu(vfloat16m8_t maskedoff, vfloat16m8_t src, _Float16 value, size_t vl) {
67 return __riscv_vfslide1down_tu(maskedoff, src, value, vl);
68 }
69
70 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfslide1down_vf_f32mf2_tu
71 // CHECK-RV64-SAME: (<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[SRC:%.*]], float noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
72 // CHECK-RV64-NEXT: entry:
73 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfslide1down.nxv1f32.f32.i64(<vscale x 1 x float> [[MASKEDOFF]], <vscale x 1 x float> [[SRC]], float [[VALUE]], i64 [[VL]])
74 // CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
75 //
test_vfslide1down_vf_f32mf2_tu(vfloat32mf2_t maskedoff,vfloat32mf2_t src,float value,size_t vl)76 vfloat32mf2_t test_vfslide1down_vf_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t src, float value, size_t vl) {
77 return __riscv_vfslide1down_tu(maskedoff, src, value, vl);
78 }
79
80 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfslide1down_vf_f32m1_tu
81 // CHECK-RV64-SAME: (<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[SRC:%.*]], float noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
82 // CHECK-RV64-NEXT: entry:
83 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfslide1down.nxv2f32.f32.i64(<vscale x 2 x float> [[MASKEDOFF]], <vscale x 2 x float> [[SRC]], float [[VALUE]], i64 [[VL]])
84 // CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
85 //
test_vfslide1down_vf_f32m1_tu(vfloat32m1_t maskedoff,vfloat32m1_t src,float value,size_t vl)86 vfloat32m1_t test_vfslide1down_vf_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t src, float value, size_t vl) {
87 return __riscv_vfslide1down_tu(maskedoff, src, value, vl);
88 }
89
90 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfslide1down_vf_f32m2_tu
91 // CHECK-RV64-SAME: (<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[SRC:%.*]], float noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
92 // CHECK-RV64-NEXT: entry:
93 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfslide1down.nxv4f32.f32.i64(<vscale x 4 x float> [[MASKEDOFF]], <vscale x 4 x float> [[SRC]], float [[VALUE]], i64 [[VL]])
94 // CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
95 //
test_vfslide1down_vf_f32m2_tu(vfloat32m2_t maskedoff,vfloat32m2_t src,float value,size_t vl)96 vfloat32m2_t test_vfslide1down_vf_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t src, float value, size_t vl) {
97 return __riscv_vfslide1down_tu(maskedoff, src, value, vl);
98 }
99
100 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfslide1down_vf_f32m4_tu
101 // CHECK-RV64-SAME: (<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[SRC:%.*]], float noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
102 // CHECK-RV64-NEXT: entry:
103 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfslide1down.nxv8f32.f32.i64(<vscale x 8 x float> [[MASKEDOFF]], <vscale x 8 x float> [[SRC]], float [[VALUE]], i64 [[VL]])
104 // CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
105 //
test_vfslide1down_vf_f32m4_tu(vfloat32m4_t maskedoff,vfloat32m4_t src,float value,size_t vl)106 vfloat32m4_t test_vfslide1down_vf_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t src, float value, size_t vl) {
107 return __riscv_vfslide1down_tu(maskedoff, src, value, vl);
108 }
109
110 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfslide1down_vf_f32m8_tu
111 // CHECK-RV64-SAME: (<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[SRC:%.*]], float noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
112 // CHECK-RV64-NEXT: entry:
113 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfslide1down.nxv16f32.f32.i64(<vscale x 16 x float> [[MASKEDOFF]], <vscale x 16 x float> [[SRC]], float [[VALUE]], i64 [[VL]])
114 // CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
115 //
test_vfslide1down_vf_f32m8_tu(vfloat32m8_t maskedoff,vfloat32m8_t src,float value,size_t vl)116 vfloat32m8_t test_vfslide1down_vf_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t src, float value, size_t vl) {
117 return __riscv_vfslide1down_tu(maskedoff, src, value, vl);
118 }
119
120 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x double> @test_vfslide1down_vf_f64m1_tu
121 // CHECK-RV64-SAME: (<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[SRC:%.*]], double noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
122 // CHECK-RV64-NEXT: entry:
123 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfslide1down.nxv1f64.f64.i64(<vscale x 1 x double> [[MASKEDOFF]], <vscale x 1 x double> [[SRC]], double [[VALUE]], i64 [[VL]])
124 // CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
125 //
test_vfslide1down_vf_f64m1_tu(vfloat64m1_t maskedoff,vfloat64m1_t src,double value,size_t vl)126 vfloat64m1_t test_vfslide1down_vf_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t src, double value, size_t vl) {
127 return __riscv_vfslide1down_tu(maskedoff, src, value, vl);
128 }
129
130 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x double> @test_vfslide1down_vf_f64m2_tu
131 // CHECK-RV64-SAME: (<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[SRC:%.*]], double noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
132 // CHECK-RV64-NEXT: entry:
133 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfslide1down.nxv2f64.f64.i64(<vscale x 2 x double> [[MASKEDOFF]], <vscale x 2 x double> [[SRC]], double [[VALUE]], i64 [[VL]])
134 // CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
135 //
test_vfslide1down_vf_f64m2_tu(vfloat64m2_t maskedoff,vfloat64m2_t src,double value,size_t vl)136 vfloat64m2_t test_vfslide1down_vf_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t src, double value, size_t vl) {
137 return __riscv_vfslide1down_tu(maskedoff, src, value, vl);
138 }
139
140 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x double> @test_vfslide1down_vf_f64m4_tu
141 // CHECK-RV64-SAME: (<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[SRC:%.*]], double noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
142 // CHECK-RV64-NEXT: entry:
143 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfslide1down.nxv4f64.f64.i64(<vscale x 4 x double> [[MASKEDOFF]], <vscale x 4 x double> [[SRC]], double [[VALUE]], i64 [[VL]])
144 // CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
145 //
test_vfslide1down_vf_f64m4_tu(vfloat64m4_t maskedoff,vfloat64m4_t src,double value,size_t vl)146 vfloat64m4_t test_vfslide1down_vf_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t src, double value, size_t vl) {
147 return __riscv_vfslide1down_tu(maskedoff, src, value, vl);
148 }
149
150 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x double> @test_vfslide1down_vf_f64m8_tu
151 // CHECK-RV64-SAME: (<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[SRC:%.*]], double noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
152 // CHECK-RV64-NEXT: entry:
153 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfslide1down.nxv8f64.f64.i64(<vscale x 8 x double> [[MASKEDOFF]], <vscale x 8 x double> [[SRC]], double [[VALUE]], i64 [[VL]])
154 // CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
155 //
test_vfslide1down_vf_f64m8_tu(vfloat64m8_t maskedoff,vfloat64m8_t src,double value,size_t vl)156 vfloat64m8_t test_vfslide1down_vf_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t src, double value, size_t vl) {
157 return __riscv_vfslide1down_tu(maskedoff, src, value, vl);
158 }
159
160 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x half> @test_vfslide1down_vf_f16mf4_tum
161 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x half> [[MASKEDOFF:%.*]], <vscale x 1 x half> [[SRC:%.*]], half noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
162 // CHECK-RV64-NEXT: entry:
163 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfslide1down.mask.nxv1f16.f16.i64(<vscale x 1 x half> [[MASKEDOFF]], <vscale x 1 x half> [[SRC]], half [[VALUE]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 2)
164 // CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
165 //
test_vfslide1down_vf_f16mf4_tum(vbool64_t mask,vfloat16mf4_t maskedoff,vfloat16mf4_t src,_Float16 value,size_t vl)166 vfloat16mf4_t test_vfslide1down_vf_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t src, _Float16 value, size_t vl) {
167 return __riscv_vfslide1down_tum(mask, maskedoff, src, value, vl);
168 }
169
170 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x half> @test_vfslide1down_vf_f16mf2_tum
171 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x half> [[MASKEDOFF:%.*]], <vscale x 2 x half> [[SRC:%.*]], half noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
172 // CHECK-RV64-NEXT: entry:
173 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfslide1down.mask.nxv2f16.f16.i64(<vscale x 2 x half> [[MASKEDOFF]], <vscale x 2 x half> [[SRC]], half [[VALUE]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 2)
174 // CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
175 //
test_vfslide1down_vf_f16mf2_tum(vbool32_t mask,vfloat16mf2_t maskedoff,vfloat16mf2_t src,_Float16 value,size_t vl)176 vfloat16mf2_t test_vfslide1down_vf_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t src, _Float16 value, size_t vl) {
177 return __riscv_vfslide1down_tum(mask, maskedoff, src, value, vl);
178 }
179
180 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x half> @test_vfslide1down_vf_f16m1_tum
181 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x half> [[MASKEDOFF:%.*]], <vscale x 4 x half> [[SRC:%.*]], half noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
182 // CHECK-RV64-NEXT: entry:
183 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfslide1down.mask.nxv4f16.f16.i64(<vscale x 4 x half> [[MASKEDOFF]], <vscale x 4 x half> [[SRC]], half [[VALUE]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 2)
184 // CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
185 //
test_vfslide1down_vf_f16m1_tum(vbool16_t mask,vfloat16m1_t maskedoff,vfloat16m1_t src,_Float16 value,size_t vl)186 vfloat16m1_t test_vfslide1down_vf_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t src, _Float16 value, size_t vl) {
187 return __riscv_vfslide1down_tum(mask, maskedoff, src, value, vl);
188 }
189
190 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x half> @test_vfslide1down_vf_f16m2_tum
191 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x half> [[MASKEDOFF:%.*]], <vscale x 8 x half> [[SRC:%.*]], half noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
192 // CHECK-RV64-NEXT: entry:
193 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfslide1down.mask.nxv8f16.f16.i64(<vscale x 8 x half> [[MASKEDOFF]], <vscale x 8 x half> [[SRC]], half [[VALUE]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 2)
194 // CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
195 //
test_vfslide1down_vf_f16m2_tum(vbool8_t mask,vfloat16m2_t maskedoff,vfloat16m2_t src,_Float16 value,size_t vl)196 vfloat16m2_t test_vfslide1down_vf_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t src, _Float16 value, size_t vl) {
197 return __riscv_vfslide1down_tum(mask, maskedoff, src, value, vl);
198 }
199
200 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x half> @test_vfslide1down_vf_f16m4_tum
201 // CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x half> [[MASKEDOFF:%.*]], <vscale x 16 x half> [[SRC:%.*]], half noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
202 // CHECK-RV64-NEXT: entry:
203 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfslide1down.mask.nxv16f16.f16.i64(<vscale x 16 x half> [[MASKEDOFF]], <vscale x 16 x half> [[SRC]], half [[VALUE]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 2)
204 // CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
205 //
test_vfslide1down_vf_f16m4_tum(vbool4_t mask,vfloat16m4_t maskedoff,vfloat16m4_t src,_Float16 value,size_t vl)206 vfloat16m4_t test_vfslide1down_vf_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t src, _Float16 value, size_t vl) {
207 return __riscv_vfslide1down_tum(mask, maskedoff, src, value, vl);
208 }
209
210 // CHECK-RV64-LABEL: define dso_local <vscale x 32 x half> @test_vfslide1down_vf_f16m8_tum
211 // CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x half> [[MASKEDOFF:%.*]], <vscale x 32 x half> [[SRC:%.*]], half noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
212 // CHECK-RV64-NEXT: entry:
213 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfslide1down.mask.nxv32f16.f16.i64(<vscale x 32 x half> [[MASKEDOFF]], <vscale x 32 x half> [[SRC]], half [[VALUE]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 2)
214 // CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
215 //
test_vfslide1down_vf_f16m8_tum(vbool2_t mask,vfloat16m8_t maskedoff,vfloat16m8_t src,_Float16 value,size_t vl)216 vfloat16m8_t test_vfslide1down_vf_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t src, _Float16 value, size_t vl) {
217 return __riscv_vfslide1down_tum(mask, maskedoff, src, value, vl);
218 }
219
220 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfslide1down_vf_f32mf2_tum
221 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[SRC:%.*]], float noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
222 // CHECK-RV64-NEXT: entry:
223 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfslide1down.mask.nxv1f32.f32.i64(<vscale x 1 x float> [[MASKEDOFF]], <vscale x 1 x float> [[SRC]], float [[VALUE]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 2)
224 // CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
225 //
test_vfslide1down_vf_f32mf2_tum(vbool64_t mask,vfloat32mf2_t maskedoff,vfloat32mf2_t src,float value,size_t vl)226 vfloat32mf2_t test_vfslide1down_vf_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t src, float value, size_t vl) {
227 return __riscv_vfslide1down_tum(mask, maskedoff, src, value, vl);
228 }
229
230 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfslide1down_vf_f32m1_tum
231 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[SRC:%.*]], float noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
232 // CHECK-RV64-NEXT: entry:
233 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfslide1down.mask.nxv2f32.f32.i64(<vscale x 2 x float> [[MASKEDOFF]], <vscale x 2 x float> [[SRC]], float [[VALUE]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 2)
234 // CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
235 //
test_vfslide1down_vf_f32m1_tum(vbool32_t mask,vfloat32m1_t maskedoff,vfloat32m1_t src,float value,size_t vl)236 vfloat32m1_t test_vfslide1down_vf_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t src, float value, size_t vl) {
237 return __riscv_vfslide1down_tum(mask, maskedoff, src, value, vl);
238 }
239
240 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfslide1down_vf_f32m2_tum
241 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[SRC:%.*]], float noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
242 // CHECK-RV64-NEXT: entry:
243 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfslide1down.mask.nxv4f32.f32.i64(<vscale x 4 x float> [[MASKEDOFF]], <vscale x 4 x float> [[SRC]], float [[VALUE]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 2)
244 // CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
245 //
test_vfslide1down_vf_f32m2_tum(vbool16_t mask,vfloat32m2_t maskedoff,vfloat32m2_t src,float value,size_t vl)246 vfloat32m2_t test_vfslide1down_vf_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t src, float value, size_t vl) {
247 return __riscv_vfslide1down_tum(mask, maskedoff, src, value, vl);
248 }
249
250 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfslide1down_vf_f32m4_tum
251 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[SRC:%.*]], float noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
252 // CHECK-RV64-NEXT: entry:
253 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfslide1down.mask.nxv8f32.f32.i64(<vscale x 8 x float> [[MASKEDOFF]], <vscale x 8 x float> [[SRC]], float [[VALUE]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 2)
254 // CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
255 //
test_vfslide1down_vf_f32m4_tum(vbool8_t mask,vfloat32m4_t maskedoff,vfloat32m4_t src,float value,size_t vl)256 vfloat32m4_t test_vfslide1down_vf_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t src, float value, size_t vl) {
257 return __riscv_vfslide1down_tum(mask, maskedoff, src, value, vl);
258 }
259
260 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfslide1down_vf_f32m8_tum
261 // CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[SRC:%.*]], float noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
262 // CHECK-RV64-NEXT: entry:
263 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfslide1down.mask.nxv16f32.f32.i64(<vscale x 16 x float> [[MASKEDOFF]], <vscale x 16 x float> [[SRC]], float [[VALUE]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 2)
264 // CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
265 //
test_vfslide1down_vf_f32m8_tum(vbool4_t mask,vfloat32m8_t maskedoff,vfloat32m8_t src,float value,size_t vl)266 vfloat32m8_t test_vfslide1down_vf_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t src, float value, size_t vl) {
267 return __riscv_vfslide1down_tum(mask, maskedoff, src, value, vl);
268 }
269
270 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x double> @test_vfslide1down_vf_f64m1_tum
271 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[SRC:%.*]], double noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
272 // CHECK-RV64-NEXT: entry:
273 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfslide1down.mask.nxv1f64.f64.i64(<vscale x 1 x double> [[MASKEDOFF]], <vscale x 1 x double> [[SRC]], double [[VALUE]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 2)
274 // CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
275 //
test_vfslide1down_vf_f64m1_tum(vbool64_t mask,vfloat64m1_t maskedoff,vfloat64m1_t src,double value,size_t vl)276 vfloat64m1_t test_vfslide1down_vf_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t src, double value, size_t vl) {
277 return __riscv_vfslide1down_tum(mask, maskedoff, src, value, vl);
278 }
279
280 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x double> @test_vfslide1down_vf_f64m2_tum
281 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[SRC:%.*]], double noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
282 // CHECK-RV64-NEXT: entry:
283 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfslide1down.mask.nxv2f64.f64.i64(<vscale x 2 x double> [[MASKEDOFF]], <vscale x 2 x double> [[SRC]], double [[VALUE]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 2)
284 // CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
285 //
test_vfslide1down_vf_f64m2_tum(vbool32_t mask,vfloat64m2_t maskedoff,vfloat64m2_t src,double value,size_t vl)286 vfloat64m2_t test_vfslide1down_vf_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t src, double value, size_t vl) {
287 return __riscv_vfslide1down_tum(mask, maskedoff, src, value, vl);
288 }
289
290 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x double> @test_vfslide1down_vf_f64m4_tum
291 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[SRC:%.*]], double noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
292 // CHECK-RV64-NEXT: entry:
293 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfslide1down.mask.nxv4f64.f64.i64(<vscale x 4 x double> [[MASKEDOFF]], <vscale x 4 x double> [[SRC]], double [[VALUE]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 2)
294 // CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
295 //
test_vfslide1down_vf_f64m4_tum(vbool16_t mask,vfloat64m4_t maskedoff,vfloat64m4_t src,double value,size_t vl)296 vfloat64m4_t test_vfslide1down_vf_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t src, double value, size_t vl) {
297 return __riscv_vfslide1down_tum(mask, maskedoff, src, value, vl);
298 }
299
300 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x double> @test_vfslide1down_vf_f64m8_tum
301 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[SRC:%.*]], double noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
302 // CHECK-RV64-NEXT: entry:
303 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfslide1down.mask.nxv8f64.f64.i64(<vscale x 8 x double> [[MASKEDOFF]], <vscale x 8 x double> [[SRC]], double [[VALUE]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 2)
304 // CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
305 //
test_vfslide1down_vf_f64m8_tum(vbool8_t mask,vfloat64m8_t maskedoff,vfloat64m8_t src,double value,size_t vl)306 vfloat64m8_t test_vfslide1down_vf_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t src, double value, size_t vl) {
307 return __riscv_vfslide1down_tum(mask, maskedoff, src, value, vl);
308 }
309
310 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x half> @test_vfslide1down_vf_f16mf4_tumu
311 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x half> [[MASKEDOFF:%.*]], <vscale x 1 x half> [[SRC:%.*]], half noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
312 // CHECK-RV64-NEXT: entry:
313 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfslide1down.mask.nxv1f16.f16.i64(<vscale x 1 x half> [[MASKEDOFF]], <vscale x 1 x half> [[SRC]], half [[VALUE]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 0)
314 // CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
315 //
test_vfslide1down_vf_f16mf4_tumu(vbool64_t mask,vfloat16mf4_t maskedoff,vfloat16mf4_t src,_Float16 value,size_t vl)316 vfloat16mf4_t test_vfslide1down_vf_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t src, _Float16 value, size_t vl) {
317 return __riscv_vfslide1down_tumu(mask, maskedoff, src, value, vl);
318 }
319
320 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x half> @test_vfslide1down_vf_f16mf2_tumu
321 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x half> [[MASKEDOFF:%.*]], <vscale x 2 x half> [[SRC:%.*]], half noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
322 // CHECK-RV64-NEXT: entry:
323 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfslide1down.mask.nxv2f16.f16.i64(<vscale x 2 x half> [[MASKEDOFF]], <vscale x 2 x half> [[SRC]], half [[VALUE]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 0)
324 // CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
325 //
test_vfslide1down_vf_f16mf2_tumu(vbool32_t mask,vfloat16mf2_t maskedoff,vfloat16mf2_t src,_Float16 value,size_t vl)326 vfloat16mf2_t test_vfslide1down_vf_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t src, _Float16 value, size_t vl) {
327 return __riscv_vfslide1down_tumu(mask, maskedoff, src, value, vl);
328 }
329
330 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x half> @test_vfslide1down_vf_f16m1_tumu
331 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x half> [[MASKEDOFF:%.*]], <vscale x 4 x half> [[SRC:%.*]], half noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
332 // CHECK-RV64-NEXT: entry:
333 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfslide1down.mask.nxv4f16.f16.i64(<vscale x 4 x half> [[MASKEDOFF]], <vscale x 4 x half> [[SRC]], half [[VALUE]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 0)
334 // CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
335 //
test_vfslide1down_vf_f16m1_tumu(vbool16_t mask,vfloat16m1_t maskedoff,vfloat16m1_t src,_Float16 value,size_t vl)336 vfloat16m1_t test_vfslide1down_vf_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t src, _Float16 value, size_t vl) {
337 return __riscv_vfslide1down_tumu(mask, maskedoff, src, value, vl);
338 }
339
340 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x half> @test_vfslide1down_vf_f16m2_tumu
341 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x half> [[MASKEDOFF:%.*]], <vscale x 8 x half> [[SRC:%.*]], half noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
342 // CHECK-RV64-NEXT: entry:
343 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfslide1down.mask.nxv8f16.f16.i64(<vscale x 8 x half> [[MASKEDOFF]], <vscale x 8 x half> [[SRC]], half [[VALUE]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 0)
344 // CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
345 //
test_vfslide1down_vf_f16m2_tumu(vbool8_t mask,vfloat16m2_t maskedoff,vfloat16m2_t src,_Float16 value,size_t vl)346 vfloat16m2_t test_vfslide1down_vf_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t src, _Float16 value, size_t vl) {
347 return __riscv_vfslide1down_tumu(mask, maskedoff, src, value, vl);
348 }
349
350 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x half> @test_vfslide1down_vf_f16m4_tumu
351 // CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x half> [[MASKEDOFF:%.*]], <vscale x 16 x half> [[SRC:%.*]], half noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
352 // CHECK-RV64-NEXT: entry:
353 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfslide1down.mask.nxv16f16.f16.i64(<vscale x 16 x half> [[MASKEDOFF]], <vscale x 16 x half> [[SRC]], half [[VALUE]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 0)
354 // CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
355 //
test_vfslide1down_vf_f16m4_tumu(vbool4_t mask,vfloat16m4_t maskedoff,vfloat16m4_t src,_Float16 value,size_t vl)356 vfloat16m4_t test_vfslide1down_vf_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t src, _Float16 value, size_t vl) {
357 return __riscv_vfslide1down_tumu(mask, maskedoff, src, value, vl);
358 }
359
360 // CHECK-RV64-LABEL: define dso_local <vscale x 32 x half> @test_vfslide1down_vf_f16m8_tumu
361 // CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x half> [[MASKEDOFF:%.*]], <vscale x 32 x half> [[SRC:%.*]], half noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
362 // CHECK-RV64-NEXT: entry:
363 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfslide1down.mask.nxv32f16.f16.i64(<vscale x 32 x half> [[MASKEDOFF]], <vscale x 32 x half> [[SRC]], half [[VALUE]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 0)
364 // CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
365 //
test_vfslide1down_vf_f16m8_tumu(vbool2_t mask,vfloat16m8_t maskedoff,vfloat16m8_t src,_Float16 value,size_t vl)366 vfloat16m8_t test_vfslide1down_vf_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t src, _Float16 value, size_t vl) {
367 return __riscv_vfslide1down_tumu(mask, maskedoff, src, value, vl);
368 }
369
370 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfslide1down_vf_f32mf2_tumu
371 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[SRC:%.*]], float noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
372 // CHECK-RV64-NEXT: entry:
373 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfslide1down.mask.nxv1f32.f32.i64(<vscale x 1 x float> [[MASKEDOFF]], <vscale x 1 x float> [[SRC]], float [[VALUE]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 0)
374 // CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
375 //
test_vfslide1down_vf_f32mf2_tumu(vbool64_t mask,vfloat32mf2_t maskedoff,vfloat32mf2_t src,float value,size_t vl)376 vfloat32mf2_t test_vfslide1down_vf_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t src, float value, size_t vl) {
377 return __riscv_vfslide1down_tumu(mask, maskedoff, src, value, vl);
378 }
379
380 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfslide1down_vf_f32m1_tumu
381 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[SRC:%.*]], float noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
382 // CHECK-RV64-NEXT: entry:
383 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfslide1down.mask.nxv2f32.f32.i64(<vscale x 2 x float> [[MASKEDOFF]], <vscale x 2 x float> [[SRC]], float [[VALUE]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 0)
384 // CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
385 //
test_vfslide1down_vf_f32m1_tumu(vbool32_t mask,vfloat32m1_t maskedoff,vfloat32m1_t src,float value,size_t vl)386 vfloat32m1_t test_vfslide1down_vf_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t src, float value, size_t vl) {
387 return __riscv_vfslide1down_tumu(mask, maskedoff, src, value, vl);
388 }
389
390 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfslide1down_vf_f32m2_tumu
391 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[SRC:%.*]], float noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
392 // CHECK-RV64-NEXT: entry:
393 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfslide1down.mask.nxv4f32.f32.i64(<vscale x 4 x float> [[MASKEDOFF]], <vscale x 4 x float> [[SRC]], float [[VALUE]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 0)
394 // CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
395 //
test_vfslide1down_vf_f32m2_tumu(vbool16_t mask,vfloat32m2_t maskedoff,vfloat32m2_t src,float value,size_t vl)396 vfloat32m2_t test_vfslide1down_vf_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t src, float value, size_t vl) {
397 return __riscv_vfslide1down_tumu(mask, maskedoff, src, value, vl);
398 }
399
400 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfslide1down_vf_f32m4_tumu
401 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[SRC:%.*]], float noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
402 // CHECK-RV64-NEXT: entry:
403 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfslide1down.mask.nxv8f32.f32.i64(<vscale x 8 x float> [[MASKEDOFF]], <vscale x 8 x float> [[SRC]], float [[VALUE]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 0)
404 // CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
405 //
test_vfslide1down_vf_f32m4_tumu(vbool8_t mask,vfloat32m4_t maskedoff,vfloat32m4_t src,float value,size_t vl)406 vfloat32m4_t test_vfslide1down_vf_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t src, float value, size_t vl) {
407 return __riscv_vfslide1down_tumu(mask, maskedoff, src, value, vl);
408 }
409
410 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfslide1down_vf_f32m8_tumu
411 // CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[SRC:%.*]], float noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
412 // CHECK-RV64-NEXT: entry:
413 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfslide1down.mask.nxv16f32.f32.i64(<vscale x 16 x float> [[MASKEDOFF]], <vscale x 16 x float> [[SRC]], float [[VALUE]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 0)
414 // CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
415 //
test_vfslide1down_vf_f32m8_tumu(vbool4_t mask,vfloat32m8_t maskedoff,vfloat32m8_t src,float value,size_t vl)416 vfloat32m8_t test_vfslide1down_vf_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t src, float value, size_t vl) {
417 return __riscv_vfslide1down_tumu(mask, maskedoff, src, value, vl);
418 }
419
420 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x double> @test_vfslide1down_vf_f64m1_tumu
421 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[SRC:%.*]], double noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
422 // CHECK-RV64-NEXT: entry:
423 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfslide1down.mask.nxv1f64.f64.i64(<vscale x 1 x double> [[MASKEDOFF]], <vscale x 1 x double> [[SRC]], double [[VALUE]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 0)
424 // CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
425 //
test_vfslide1down_vf_f64m1_tumu(vbool64_t mask,vfloat64m1_t maskedoff,vfloat64m1_t src,double value,size_t vl)426 vfloat64m1_t test_vfslide1down_vf_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t src, double value, size_t vl) {
427 return __riscv_vfslide1down_tumu(mask, maskedoff, src, value, vl);
428 }
429
430 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x double> @test_vfslide1down_vf_f64m2_tumu
431 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[SRC:%.*]], double noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
432 // CHECK-RV64-NEXT: entry:
433 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfslide1down.mask.nxv2f64.f64.i64(<vscale x 2 x double> [[MASKEDOFF]], <vscale x 2 x double> [[SRC]], double [[VALUE]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 0)
434 // CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
435 //
test_vfslide1down_vf_f64m2_tumu(vbool32_t mask,vfloat64m2_t maskedoff,vfloat64m2_t src,double value,size_t vl)436 vfloat64m2_t test_vfslide1down_vf_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t src, double value, size_t vl) {
437 return __riscv_vfslide1down_tumu(mask, maskedoff, src, value, vl);
438 }
439
440 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x double> @test_vfslide1down_vf_f64m4_tumu
441 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[SRC:%.*]], double noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
442 // CHECK-RV64-NEXT: entry:
443 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfslide1down.mask.nxv4f64.f64.i64(<vscale x 4 x double> [[MASKEDOFF]], <vscale x 4 x double> [[SRC]], double [[VALUE]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 0)
444 // CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
445 //
test_vfslide1down_vf_f64m4_tumu(vbool16_t mask,vfloat64m4_t maskedoff,vfloat64m4_t src,double value,size_t vl)446 vfloat64m4_t test_vfslide1down_vf_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t src, double value, size_t vl) {
447 return __riscv_vfslide1down_tumu(mask, maskedoff, src, value, vl);
448 }
449
450 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x double> @test_vfslide1down_vf_f64m8_tumu
451 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[SRC:%.*]], double noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
452 // CHECK-RV64-NEXT: entry:
453 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfslide1down.mask.nxv8f64.f64.i64(<vscale x 8 x double> [[MASKEDOFF]], <vscale x 8 x double> [[SRC]], double [[VALUE]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 0)
454 // CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
455 //
test_vfslide1down_vf_f64m8_tumu(vbool8_t mask,vfloat64m8_t maskedoff,vfloat64m8_t src,double value,size_t vl)456 vfloat64m8_t test_vfslide1down_vf_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t src, double value, size_t vl) {
457 return __riscv_vfslide1down_tumu(mask, maskedoff, src, value, vl);
458 }
459
460 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x half> @test_vfslide1down_vf_f16mf4_mu
461 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x half> [[MASKEDOFF:%.*]], <vscale x 1 x half> [[SRC:%.*]], half noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
462 // CHECK-RV64-NEXT: entry:
463 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfslide1down.mask.nxv1f16.f16.i64(<vscale x 1 x half> [[MASKEDOFF]], <vscale x 1 x half> [[SRC]], half [[VALUE]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1)
464 // CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
465 //
test_vfslide1down_vf_f16mf4_mu(vbool64_t mask,vfloat16mf4_t maskedoff,vfloat16mf4_t src,_Float16 value,size_t vl)466 vfloat16mf4_t test_vfslide1down_vf_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t src, _Float16 value, size_t vl) {
467 return __riscv_vfslide1down_mu(mask, maskedoff, src, value, vl);
468 }
469
470 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x half> @test_vfslide1down_vf_f16mf2_mu
471 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x half> [[MASKEDOFF:%.*]], <vscale x 2 x half> [[SRC:%.*]], half noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
472 // CHECK-RV64-NEXT: entry:
473 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfslide1down.mask.nxv2f16.f16.i64(<vscale x 2 x half> [[MASKEDOFF]], <vscale x 2 x half> [[SRC]], half [[VALUE]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 1)
474 // CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
475 //
test_vfslide1down_vf_f16mf2_mu(vbool32_t mask,vfloat16mf2_t maskedoff,vfloat16mf2_t src,_Float16 value,size_t vl)476 vfloat16mf2_t test_vfslide1down_vf_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t src, _Float16 value, size_t vl) {
477 return __riscv_vfslide1down_mu(mask, maskedoff, src, value, vl);
478 }
479
480 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x half> @test_vfslide1down_vf_f16m1_mu
481 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x half> [[MASKEDOFF:%.*]], <vscale x 4 x half> [[SRC:%.*]], half noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
482 // CHECK-RV64-NEXT: entry:
483 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfslide1down.mask.nxv4f16.f16.i64(<vscale x 4 x half> [[MASKEDOFF]], <vscale x 4 x half> [[SRC]], half [[VALUE]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 1)
484 // CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
485 //
test_vfslide1down_vf_f16m1_mu(vbool16_t mask,vfloat16m1_t maskedoff,vfloat16m1_t src,_Float16 value,size_t vl)486 vfloat16m1_t test_vfslide1down_vf_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t src, _Float16 value, size_t vl) {
487 return __riscv_vfslide1down_mu(mask, maskedoff, src, value, vl);
488 }
489
490 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x half> @test_vfslide1down_vf_f16m2_mu
491 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x half> [[MASKEDOFF:%.*]], <vscale x 8 x half> [[SRC:%.*]], half noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
492 // CHECK-RV64-NEXT: entry:
493 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfslide1down.mask.nxv8f16.f16.i64(<vscale x 8 x half> [[MASKEDOFF]], <vscale x 8 x half> [[SRC]], half [[VALUE]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 1)
494 // CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
495 //
test_vfslide1down_vf_f16m2_mu(vbool8_t mask,vfloat16m2_t maskedoff,vfloat16m2_t src,_Float16 value,size_t vl)496 vfloat16m2_t test_vfslide1down_vf_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t src, _Float16 value, size_t vl) {
497 return __riscv_vfslide1down_mu(mask, maskedoff, src, value, vl);
498 }
499
500 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x half> @test_vfslide1down_vf_f16m4_mu
501 // CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x half> [[MASKEDOFF:%.*]], <vscale x 16 x half> [[SRC:%.*]], half noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
502 // CHECK-RV64-NEXT: entry:
503 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfslide1down.mask.nxv16f16.f16.i64(<vscale x 16 x half> [[MASKEDOFF]], <vscale x 16 x half> [[SRC]], half [[VALUE]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 1)
504 // CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
505 //
test_vfslide1down_vf_f16m4_mu(vbool4_t mask,vfloat16m4_t maskedoff,vfloat16m4_t src,_Float16 value,size_t vl)506 vfloat16m4_t test_vfslide1down_vf_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t src, _Float16 value, size_t vl) {
507 return __riscv_vfslide1down_mu(mask, maskedoff, src, value, vl);
508 }
509
510 // CHECK-RV64-LABEL: define dso_local <vscale x 32 x half> @test_vfslide1down_vf_f16m8_mu
511 // CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x half> [[MASKEDOFF:%.*]], <vscale x 32 x half> [[SRC:%.*]], half noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
512 // CHECK-RV64-NEXT: entry:
513 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfslide1down.mask.nxv32f16.f16.i64(<vscale x 32 x half> [[MASKEDOFF]], <vscale x 32 x half> [[SRC]], half [[VALUE]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 1)
514 // CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
515 //
test_vfslide1down_vf_f16m8_mu(vbool2_t mask,vfloat16m8_t maskedoff,vfloat16m8_t src,_Float16 value,size_t vl)516 vfloat16m8_t test_vfslide1down_vf_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t src, _Float16 value, size_t vl) {
517 return __riscv_vfslide1down_mu(mask, maskedoff, src, value, vl);
518 }
519
520 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfslide1down_vf_f32mf2_mu
521 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[SRC:%.*]], float noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
522 // CHECK-RV64-NEXT: entry:
523 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfslide1down.mask.nxv1f32.f32.i64(<vscale x 1 x float> [[MASKEDOFF]], <vscale x 1 x float> [[SRC]], float [[VALUE]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1)
524 // CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
525 //
test_vfslide1down_vf_f32mf2_mu(vbool64_t mask,vfloat32mf2_t maskedoff,vfloat32mf2_t src,float value,size_t vl)526 vfloat32mf2_t test_vfslide1down_vf_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t src, float value, size_t vl) {
527 return __riscv_vfslide1down_mu(mask, maskedoff, src, value, vl);
528 }
529
530 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfslide1down_vf_f32m1_mu
531 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[SRC:%.*]], float noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
532 // CHECK-RV64-NEXT: entry:
533 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfslide1down.mask.nxv2f32.f32.i64(<vscale x 2 x float> [[MASKEDOFF]], <vscale x 2 x float> [[SRC]], float [[VALUE]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 1)
534 // CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
535 //
test_vfslide1down_vf_f32m1_mu(vbool32_t mask,vfloat32m1_t maskedoff,vfloat32m1_t src,float value,size_t vl)536 vfloat32m1_t test_vfslide1down_vf_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t src, float value, size_t vl) {
537 return __riscv_vfslide1down_mu(mask, maskedoff, src, value, vl);
538 }
539
540 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfslide1down_vf_f32m2_mu
541 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[SRC:%.*]], float noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
542 // CHECK-RV64-NEXT: entry:
543 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfslide1down.mask.nxv4f32.f32.i64(<vscale x 4 x float> [[MASKEDOFF]], <vscale x 4 x float> [[SRC]], float [[VALUE]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 1)
544 // CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
545 //
test_vfslide1down_vf_f32m2_mu(vbool16_t mask,vfloat32m2_t maskedoff,vfloat32m2_t src,float value,size_t vl)546 vfloat32m2_t test_vfslide1down_vf_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t src, float value, size_t vl) {
547 return __riscv_vfslide1down_mu(mask, maskedoff, src, value, vl);
548 }
549
550 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfslide1down_vf_f32m4_mu
551 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[SRC:%.*]], float noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
552 // CHECK-RV64-NEXT: entry:
553 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfslide1down.mask.nxv8f32.f32.i64(<vscale x 8 x float> [[MASKEDOFF]], <vscale x 8 x float> [[SRC]], float [[VALUE]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 1)
554 // CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
555 //
test_vfslide1down_vf_f32m4_mu(vbool8_t mask,vfloat32m4_t maskedoff,vfloat32m4_t src,float value,size_t vl)556 vfloat32m4_t test_vfslide1down_vf_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t src, float value, size_t vl) {
557 return __riscv_vfslide1down_mu(mask, maskedoff, src, value, vl);
558 }
559
560 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfslide1down_vf_f32m8_mu
561 // CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[SRC:%.*]], float noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
562 // CHECK-RV64-NEXT: entry:
563 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfslide1down.mask.nxv16f32.f32.i64(<vscale x 16 x float> [[MASKEDOFF]], <vscale x 16 x float> [[SRC]], float [[VALUE]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 1)
564 // CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
565 //
test_vfslide1down_vf_f32m8_mu(vbool4_t mask,vfloat32m8_t maskedoff,vfloat32m8_t src,float value,size_t vl)566 vfloat32m8_t test_vfslide1down_vf_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t src, float value, size_t vl) {
567 return __riscv_vfslide1down_mu(mask, maskedoff, src, value, vl);
568 }
569
570 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x double> @test_vfslide1down_vf_f64m1_mu
571 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[SRC:%.*]], double noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
572 // CHECK-RV64-NEXT: entry:
573 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfslide1down.mask.nxv1f64.f64.i64(<vscale x 1 x double> [[MASKEDOFF]], <vscale x 1 x double> [[SRC]], double [[VALUE]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1)
574 // CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
575 //
test_vfslide1down_vf_f64m1_mu(vbool64_t mask,vfloat64m1_t maskedoff,vfloat64m1_t src,double value,size_t vl)576 vfloat64m1_t test_vfslide1down_vf_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t src, double value, size_t vl) {
577 return __riscv_vfslide1down_mu(mask, maskedoff, src, value, vl);
578 }
579
580 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x double> @test_vfslide1down_vf_f64m2_mu
581 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[SRC:%.*]], double noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
582 // CHECK-RV64-NEXT: entry:
583 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfslide1down.mask.nxv2f64.f64.i64(<vscale x 2 x double> [[MASKEDOFF]], <vscale x 2 x double> [[SRC]], double [[VALUE]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 1)
584 // CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
585 //
test_vfslide1down_vf_f64m2_mu(vbool32_t mask,vfloat64m2_t maskedoff,vfloat64m2_t src,double value,size_t vl)586 vfloat64m2_t test_vfslide1down_vf_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t src, double value, size_t vl) {
587 return __riscv_vfslide1down_mu(mask, maskedoff, src, value, vl);
588 }
589
590 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x double> @test_vfslide1down_vf_f64m4_mu
591 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[SRC:%.*]], double noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
592 // CHECK-RV64-NEXT: entry:
593 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfslide1down.mask.nxv4f64.f64.i64(<vscale x 4 x double> [[MASKEDOFF]], <vscale x 4 x double> [[SRC]], double [[VALUE]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 1)
594 // CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
595 //
test_vfslide1down_vf_f64m4_mu(vbool16_t mask,vfloat64m4_t maskedoff,vfloat64m4_t src,double value,size_t vl)596 vfloat64m4_t test_vfslide1down_vf_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t src, double value, size_t vl) {
597 return __riscv_vfslide1down_mu(mask, maskedoff, src, value, vl);
598 }
599
600 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x double> @test_vfslide1down_vf_f64m8_mu
601 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[SRC:%.*]], double noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
602 // CHECK-RV64-NEXT: entry:
603 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfslide1down.mask.nxv8f64.f64.i64(<vscale x 8 x double> [[MASKEDOFF]], <vscale x 8 x double> [[SRC]], double [[VALUE]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 1)
604 // CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
605 //
test_vfslide1down_vf_f64m8_mu(vbool8_t mask,vfloat64m8_t maskedoff,vfloat64m8_t src,double value,size_t vl)606 vfloat64m8_t test_vfslide1down_vf_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t src, double value, size_t vl) {
607 return __riscv_vfslide1down_mu(mask, maskedoff, src, value, vl);
608 }
609
610