1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 2
2 // REQUIRES: riscv-registered-target
3 // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \
4 // RUN: -target-feature +zvfh -disable-O0-optnone \
5 // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
6 // RUN: FileCheck --check-prefix=CHECK-RV64 %s
7
8 #include <riscv_vector.h>
9
10 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x half> @test_vslideup_vx_f16mf4
11 // CHECK-RV64-SAME: (<vscale x 1 x half> [[DEST:%.*]], <vscale x 1 x half> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
12 // CHECK-RV64-NEXT: entry:
13 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vslideup.nxv1f16.i64(<vscale x 1 x half> [[DEST]], <vscale x 1 x half> [[SRC]], i64 [[OFFSET]], i64 [[VL]], i64 3)
14 // CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
15 //
test_vslideup_vx_f16mf4(vfloat16mf4_t dest,vfloat16mf4_t src,size_t offset,size_t vl)16 vfloat16mf4_t test_vslideup_vx_f16mf4(vfloat16mf4_t dest, vfloat16mf4_t src, size_t offset, size_t vl) {
17 return __riscv_vslideup(dest, src, offset, vl);
18 }
19
20 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x half> @test_vslideup_vx_f16mf2
21 // CHECK-RV64-SAME: (<vscale x 2 x half> [[DEST:%.*]], <vscale x 2 x half> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
22 // CHECK-RV64-NEXT: entry:
23 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vslideup.nxv2f16.i64(<vscale x 2 x half> [[DEST]], <vscale x 2 x half> [[SRC]], i64 [[OFFSET]], i64 [[VL]], i64 3)
24 // CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
25 //
test_vslideup_vx_f16mf2(vfloat16mf2_t dest,vfloat16mf2_t src,size_t offset,size_t vl)26 vfloat16mf2_t test_vslideup_vx_f16mf2(vfloat16mf2_t dest, vfloat16mf2_t src, size_t offset, size_t vl) {
27 return __riscv_vslideup(dest, src, offset, vl);
28 }
29
30 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x half> @test_vslideup_vx_f16m1
31 // CHECK-RV64-SAME: (<vscale x 4 x half> [[DEST:%.*]], <vscale x 4 x half> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
32 // CHECK-RV64-NEXT: entry:
33 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vslideup.nxv4f16.i64(<vscale x 4 x half> [[DEST]], <vscale x 4 x half> [[SRC]], i64 [[OFFSET]], i64 [[VL]], i64 3)
34 // CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
35 //
test_vslideup_vx_f16m1(vfloat16m1_t dest,vfloat16m1_t src,size_t offset,size_t vl)36 vfloat16m1_t test_vslideup_vx_f16m1(vfloat16m1_t dest, vfloat16m1_t src, size_t offset, size_t vl) {
37 return __riscv_vslideup(dest, src, offset, vl);
38 }
39
40 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x half> @test_vslideup_vx_f16m2
41 // CHECK-RV64-SAME: (<vscale x 8 x half> [[DEST:%.*]], <vscale x 8 x half> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
42 // CHECK-RV64-NEXT: entry:
43 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vslideup.nxv8f16.i64(<vscale x 8 x half> [[DEST]], <vscale x 8 x half> [[SRC]], i64 [[OFFSET]], i64 [[VL]], i64 3)
44 // CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
45 //
test_vslideup_vx_f16m2(vfloat16m2_t dest,vfloat16m2_t src,size_t offset,size_t vl)46 vfloat16m2_t test_vslideup_vx_f16m2(vfloat16m2_t dest, vfloat16m2_t src, size_t offset, size_t vl) {
47 return __riscv_vslideup(dest, src, offset, vl);
48 }
49
50 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x half> @test_vslideup_vx_f16m4
51 // CHECK-RV64-SAME: (<vscale x 16 x half> [[DEST:%.*]], <vscale x 16 x half> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
52 // CHECK-RV64-NEXT: entry:
53 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vslideup.nxv16f16.i64(<vscale x 16 x half> [[DEST]], <vscale x 16 x half> [[SRC]], i64 [[OFFSET]], i64 [[VL]], i64 3)
54 // CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
55 //
test_vslideup_vx_f16m4(vfloat16m4_t dest,vfloat16m4_t src,size_t offset,size_t vl)56 vfloat16m4_t test_vslideup_vx_f16m4(vfloat16m4_t dest, vfloat16m4_t src, size_t offset, size_t vl) {
57 return __riscv_vslideup(dest, src, offset, vl);
58 }
59
60 // CHECK-RV64-LABEL: define dso_local <vscale x 32 x half> @test_vslideup_vx_f16m8
61 // CHECK-RV64-SAME: (<vscale x 32 x half> [[DEST:%.*]], <vscale x 32 x half> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
62 // CHECK-RV64-NEXT: entry:
63 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vslideup.nxv32f16.i64(<vscale x 32 x half> [[DEST]], <vscale x 32 x half> [[SRC]], i64 [[OFFSET]], i64 [[VL]], i64 3)
64 // CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
65 //
test_vslideup_vx_f16m8(vfloat16m8_t dest,vfloat16m8_t src,size_t offset,size_t vl)66 vfloat16m8_t test_vslideup_vx_f16m8(vfloat16m8_t dest, vfloat16m8_t src, size_t offset, size_t vl) {
67 return __riscv_vslideup(dest, src, offset, vl);
68 }
69
70 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vslideup_vx_f32mf2
71 // CHECK-RV64-SAME: (<vscale x 1 x float> [[DEST:%.*]], <vscale x 1 x float> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
72 // CHECK-RV64-NEXT: entry:
73 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vslideup.nxv1f32.i64(<vscale x 1 x float> [[DEST]], <vscale x 1 x float> [[SRC]], i64 [[OFFSET]], i64 [[VL]], i64 3)
74 // CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
75 //
test_vslideup_vx_f32mf2(vfloat32mf2_t dest,vfloat32mf2_t src,size_t offset,size_t vl)76 vfloat32mf2_t test_vslideup_vx_f32mf2(vfloat32mf2_t dest, vfloat32mf2_t src, size_t offset, size_t vl) {
77 return __riscv_vslideup(dest, src, offset, vl);
78 }
79
80 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vslideup_vx_f32m1
81 // CHECK-RV64-SAME: (<vscale x 2 x float> [[DEST:%.*]], <vscale x 2 x float> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
82 // CHECK-RV64-NEXT: entry:
83 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vslideup.nxv2f32.i64(<vscale x 2 x float> [[DEST]], <vscale x 2 x float> [[SRC]], i64 [[OFFSET]], i64 [[VL]], i64 3)
84 // CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
85 //
test_vslideup_vx_f32m1(vfloat32m1_t dest,vfloat32m1_t src,size_t offset,size_t vl)86 vfloat32m1_t test_vslideup_vx_f32m1(vfloat32m1_t dest, vfloat32m1_t src, size_t offset, size_t vl) {
87 return __riscv_vslideup(dest, src, offset, vl);
88 }
89
90 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vslideup_vx_f32m2
91 // CHECK-RV64-SAME: (<vscale x 4 x float> [[DEST:%.*]], <vscale x 4 x float> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
92 // CHECK-RV64-NEXT: entry:
93 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vslideup.nxv4f32.i64(<vscale x 4 x float> [[DEST]], <vscale x 4 x float> [[SRC]], i64 [[OFFSET]], i64 [[VL]], i64 3)
94 // CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
95 //
test_vslideup_vx_f32m2(vfloat32m2_t dest,vfloat32m2_t src,size_t offset,size_t vl)96 vfloat32m2_t test_vslideup_vx_f32m2(vfloat32m2_t dest, vfloat32m2_t src, size_t offset, size_t vl) {
97 return __riscv_vslideup(dest, src, offset, vl);
98 }
99
100 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vslideup_vx_f32m4
101 // CHECK-RV64-SAME: (<vscale x 8 x float> [[DEST:%.*]], <vscale x 8 x float> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
102 // CHECK-RV64-NEXT: entry:
103 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vslideup.nxv8f32.i64(<vscale x 8 x float> [[DEST]], <vscale x 8 x float> [[SRC]], i64 [[OFFSET]], i64 [[VL]], i64 3)
104 // CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
105 //
test_vslideup_vx_f32m4(vfloat32m4_t dest,vfloat32m4_t src,size_t offset,size_t vl)106 vfloat32m4_t test_vslideup_vx_f32m4(vfloat32m4_t dest, vfloat32m4_t src, size_t offset, size_t vl) {
107 return __riscv_vslideup(dest, src, offset, vl);
108 }
109
110 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vslideup_vx_f32m8
111 // CHECK-RV64-SAME: (<vscale x 16 x float> [[DEST:%.*]], <vscale x 16 x float> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
112 // CHECK-RV64-NEXT: entry:
113 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vslideup.nxv16f32.i64(<vscale x 16 x float> [[DEST]], <vscale x 16 x float> [[SRC]], i64 [[OFFSET]], i64 [[VL]], i64 3)
114 // CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
115 //
test_vslideup_vx_f32m8(vfloat32m8_t dest,vfloat32m8_t src,size_t offset,size_t vl)116 vfloat32m8_t test_vslideup_vx_f32m8(vfloat32m8_t dest, vfloat32m8_t src, size_t offset, size_t vl) {
117 return __riscv_vslideup(dest, src, offset, vl);
118 }
119
120 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x double> @test_vslideup_vx_f64m1
121 // CHECK-RV64-SAME: (<vscale x 1 x double> [[DEST:%.*]], <vscale x 1 x double> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
122 // CHECK-RV64-NEXT: entry:
123 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vslideup.nxv1f64.i64(<vscale x 1 x double> [[DEST]], <vscale x 1 x double> [[SRC]], i64 [[OFFSET]], i64 [[VL]], i64 3)
124 // CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
125 //
test_vslideup_vx_f64m1(vfloat64m1_t dest,vfloat64m1_t src,size_t offset,size_t vl)126 vfloat64m1_t test_vslideup_vx_f64m1(vfloat64m1_t dest, vfloat64m1_t src, size_t offset, size_t vl) {
127 return __riscv_vslideup(dest, src, offset, vl);
128 }
129
130 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x double> @test_vslideup_vx_f64m2
131 // CHECK-RV64-SAME: (<vscale x 2 x double> [[DEST:%.*]], <vscale x 2 x double> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
132 // CHECK-RV64-NEXT: entry:
133 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vslideup.nxv2f64.i64(<vscale x 2 x double> [[DEST]], <vscale x 2 x double> [[SRC]], i64 [[OFFSET]], i64 [[VL]], i64 3)
134 // CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
135 //
test_vslideup_vx_f64m2(vfloat64m2_t dest,vfloat64m2_t src,size_t offset,size_t vl)136 vfloat64m2_t test_vslideup_vx_f64m2(vfloat64m2_t dest, vfloat64m2_t src, size_t offset, size_t vl) {
137 return __riscv_vslideup(dest, src, offset, vl);
138 }
139
140 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x double> @test_vslideup_vx_f64m4
141 // CHECK-RV64-SAME: (<vscale x 4 x double> [[DEST:%.*]], <vscale x 4 x double> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
142 // CHECK-RV64-NEXT: entry:
143 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vslideup.nxv4f64.i64(<vscale x 4 x double> [[DEST]], <vscale x 4 x double> [[SRC]], i64 [[OFFSET]], i64 [[VL]], i64 3)
144 // CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
145 //
test_vslideup_vx_f64m4(vfloat64m4_t dest,vfloat64m4_t src,size_t offset,size_t vl)146 vfloat64m4_t test_vslideup_vx_f64m4(vfloat64m4_t dest, vfloat64m4_t src, size_t offset, size_t vl) {
147 return __riscv_vslideup(dest, src, offset, vl);
148 }
149
150 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x double> @test_vslideup_vx_f64m8
151 // CHECK-RV64-SAME: (<vscale x 8 x double> [[DEST:%.*]], <vscale x 8 x double> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
152 // CHECK-RV64-NEXT: entry:
153 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vslideup.nxv8f64.i64(<vscale x 8 x double> [[DEST]], <vscale x 8 x double> [[SRC]], i64 [[OFFSET]], i64 [[VL]], i64 3)
154 // CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
155 //
test_vslideup_vx_f64m8(vfloat64m8_t dest,vfloat64m8_t src,size_t offset,size_t vl)156 vfloat64m8_t test_vslideup_vx_f64m8(vfloat64m8_t dest, vfloat64m8_t src, size_t offset, size_t vl) {
157 return __riscv_vslideup(dest, src, offset, vl);
158 }
159
160 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vslideup_vx_i8mf8
161 // CHECK-RV64-SAME: (<vscale x 1 x i8> [[DEST:%.*]], <vscale x 1 x i8> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
162 // CHECK-RV64-NEXT: entry:
163 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vslideup.nxv1i8.i64(<vscale x 1 x i8> [[DEST]], <vscale x 1 x i8> [[SRC]], i64 [[OFFSET]], i64 [[VL]], i64 3)
164 // CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
165 //
test_vslideup_vx_i8mf8(vint8mf8_t dest,vint8mf8_t src,size_t offset,size_t vl)166 vint8mf8_t test_vslideup_vx_i8mf8(vint8mf8_t dest, vint8mf8_t src, size_t offset, size_t vl) {
167 return __riscv_vslideup(dest, src, offset, vl);
168 }
169
170 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vslideup_vx_i8mf4
171 // CHECK-RV64-SAME: (<vscale x 2 x i8> [[DEST:%.*]], <vscale x 2 x i8> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
172 // CHECK-RV64-NEXT: entry:
173 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vslideup.nxv2i8.i64(<vscale x 2 x i8> [[DEST]], <vscale x 2 x i8> [[SRC]], i64 [[OFFSET]], i64 [[VL]], i64 3)
174 // CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
175 //
test_vslideup_vx_i8mf4(vint8mf4_t dest,vint8mf4_t src,size_t offset,size_t vl)176 vint8mf4_t test_vslideup_vx_i8mf4(vint8mf4_t dest, vint8mf4_t src, size_t offset, size_t vl) {
177 return __riscv_vslideup(dest, src, offset, vl);
178 }
179
180 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vslideup_vx_i8mf2
181 // CHECK-RV64-SAME: (<vscale x 4 x i8> [[DEST:%.*]], <vscale x 4 x i8> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
182 // CHECK-RV64-NEXT: entry:
183 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vslideup.nxv4i8.i64(<vscale x 4 x i8> [[DEST]], <vscale x 4 x i8> [[SRC]], i64 [[OFFSET]], i64 [[VL]], i64 3)
184 // CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
185 //
test_vslideup_vx_i8mf2(vint8mf2_t dest,vint8mf2_t src,size_t offset,size_t vl)186 vint8mf2_t test_vslideup_vx_i8mf2(vint8mf2_t dest, vint8mf2_t src, size_t offset, size_t vl) {
187 return __riscv_vslideup(dest, src, offset, vl);
188 }
189
190 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vslideup_vx_i8m1
191 // CHECK-RV64-SAME: (<vscale x 8 x i8> [[DEST:%.*]], <vscale x 8 x i8> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
192 // CHECK-RV64-NEXT: entry:
193 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vslideup.nxv8i8.i64(<vscale x 8 x i8> [[DEST]], <vscale x 8 x i8> [[SRC]], i64 [[OFFSET]], i64 [[VL]], i64 3)
194 // CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
195 //
test_vslideup_vx_i8m1(vint8m1_t dest,vint8m1_t src,size_t offset,size_t vl)196 vint8m1_t test_vslideup_vx_i8m1(vint8m1_t dest, vint8m1_t src, size_t offset, size_t vl) {
197 return __riscv_vslideup(dest, src, offset, vl);
198 }
199
200 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vslideup_vx_i8m2
201 // CHECK-RV64-SAME: (<vscale x 16 x i8> [[DEST:%.*]], <vscale x 16 x i8> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
202 // CHECK-RV64-NEXT: entry:
203 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vslideup.nxv16i8.i64(<vscale x 16 x i8> [[DEST]], <vscale x 16 x i8> [[SRC]], i64 [[OFFSET]], i64 [[VL]], i64 3)
204 // CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
205 //
test_vslideup_vx_i8m2(vint8m2_t dest,vint8m2_t src,size_t offset,size_t vl)206 vint8m2_t test_vslideup_vx_i8m2(vint8m2_t dest, vint8m2_t src, size_t offset, size_t vl) {
207 return __riscv_vslideup(dest, src, offset, vl);
208 }
209
210 // CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vslideup_vx_i8m4
211 // CHECK-RV64-SAME: (<vscale x 32 x i8> [[DEST:%.*]], <vscale x 32 x i8> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
212 // CHECK-RV64-NEXT: entry:
213 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vslideup.nxv32i8.i64(<vscale x 32 x i8> [[DEST]], <vscale x 32 x i8> [[SRC]], i64 [[OFFSET]], i64 [[VL]], i64 3)
214 // CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
215 //
test_vslideup_vx_i8m4(vint8m4_t dest,vint8m4_t src,size_t offset,size_t vl)216 vint8m4_t test_vslideup_vx_i8m4(vint8m4_t dest, vint8m4_t src, size_t offset, size_t vl) {
217 return __riscv_vslideup(dest, src, offset, vl);
218 }
219
220 // CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vslideup_vx_i8m8
221 // CHECK-RV64-SAME: (<vscale x 64 x i8> [[DEST:%.*]], <vscale x 64 x i8> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
222 // CHECK-RV64-NEXT: entry:
223 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vslideup.nxv64i8.i64(<vscale x 64 x i8> [[DEST]], <vscale x 64 x i8> [[SRC]], i64 [[OFFSET]], i64 [[VL]], i64 3)
224 // CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
225 //
test_vslideup_vx_i8m8(vint8m8_t dest,vint8m8_t src,size_t offset,size_t vl)226 vint8m8_t test_vslideup_vx_i8m8(vint8m8_t dest, vint8m8_t src, size_t offset, size_t vl) {
227 return __riscv_vslideup(dest, src, offset, vl);
228 }
229
230 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vslideup_vx_i16mf4
231 // CHECK-RV64-SAME: (<vscale x 1 x i16> [[DEST:%.*]], <vscale x 1 x i16> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
232 // CHECK-RV64-NEXT: entry:
233 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vslideup.nxv1i16.i64(<vscale x 1 x i16> [[DEST]], <vscale x 1 x i16> [[SRC]], i64 [[OFFSET]], i64 [[VL]], i64 3)
234 // CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
235 //
test_vslideup_vx_i16mf4(vint16mf4_t dest,vint16mf4_t src,size_t offset,size_t vl)236 vint16mf4_t test_vslideup_vx_i16mf4(vint16mf4_t dest, vint16mf4_t src, size_t offset, size_t vl) {
237 return __riscv_vslideup(dest, src, offset, vl);
238 }
239
240 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vslideup_vx_i16mf2
241 // CHECK-RV64-SAME: (<vscale x 2 x i16> [[DEST:%.*]], <vscale x 2 x i16> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
242 // CHECK-RV64-NEXT: entry:
243 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vslideup.nxv2i16.i64(<vscale x 2 x i16> [[DEST]], <vscale x 2 x i16> [[SRC]], i64 [[OFFSET]], i64 [[VL]], i64 3)
244 // CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
245 //
test_vslideup_vx_i16mf2(vint16mf2_t dest,vint16mf2_t src,size_t offset,size_t vl)246 vint16mf2_t test_vslideup_vx_i16mf2(vint16mf2_t dest, vint16mf2_t src, size_t offset, size_t vl) {
247 return __riscv_vslideup(dest, src, offset, vl);
248 }
249
250 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vslideup_vx_i16m1
251 // CHECK-RV64-SAME: (<vscale x 4 x i16> [[DEST:%.*]], <vscale x 4 x i16> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
252 // CHECK-RV64-NEXT: entry:
253 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vslideup.nxv4i16.i64(<vscale x 4 x i16> [[DEST]], <vscale x 4 x i16> [[SRC]], i64 [[OFFSET]], i64 [[VL]], i64 3)
254 // CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
255 //
test_vslideup_vx_i16m1(vint16m1_t dest,vint16m1_t src,size_t offset,size_t vl)256 vint16m1_t test_vslideup_vx_i16m1(vint16m1_t dest, vint16m1_t src, size_t offset, size_t vl) {
257 return __riscv_vslideup(dest, src, offset, vl);
258 }
259
260 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vslideup_vx_i16m2
261 // CHECK-RV64-SAME: (<vscale x 8 x i16> [[DEST:%.*]], <vscale x 8 x i16> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
262 // CHECK-RV64-NEXT: entry:
263 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vslideup.nxv8i16.i64(<vscale x 8 x i16> [[DEST]], <vscale x 8 x i16> [[SRC]], i64 [[OFFSET]], i64 [[VL]], i64 3)
264 // CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
265 //
test_vslideup_vx_i16m2(vint16m2_t dest,vint16m2_t src,size_t offset,size_t vl)266 vint16m2_t test_vslideup_vx_i16m2(vint16m2_t dest, vint16m2_t src, size_t offset, size_t vl) {
267 return __riscv_vslideup(dest, src, offset, vl);
268 }
269
270 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vslideup_vx_i16m4
271 // CHECK-RV64-SAME: (<vscale x 16 x i16> [[DEST:%.*]], <vscale x 16 x i16> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
272 // CHECK-RV64-NEXT: entry:
273 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vslideup.nxv16i16.i64(<vscale x 16 x i16> [[DEST]], <vscale x 16 x i16> [[SRC]], i64 [[OFFSET]], i64 [[VL]], i64 3)
274 // CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
275 //
test_vslideup_vx_i16m4(vint16m4_t dest,vint16m4_t src,size_t offset,size_t vl)276 vint16m4_t test_vslideup_vx_i16m4(vint16m4_t dest, vint16m4_t src, size_t offset, size_t vl) {
277 return __riscv_vslideup(dest, src, offset, vl);
278 }
279
280 // CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vslideup_vx_i16m8
281 // CHECK-RV64-SAME: (<vscale x 32 x i16> [[DEST:%.*]], <vscale x 32 x i16> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
282 // CHECK-RV64-NEXT: entry:
283 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vslideup.nxv32i16.i64(<vscale x 32 x i16> [[DEST]], <vscale x 32 x i16> [[SRC]], i64 [[OFFSET]], i64 [[VL]], i64 3)
284 // CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
285 //
test_vslideup_vx_i16m8(vint16m8_t dest,vint16m8_t src,size_t offset,size_t vl)286 vint16m8_t test_vslideup_vx_i16m8(vint16m8_t dest, vint16m8_t src, size_t offset, size_t vl) {
287 return __riscv_vslideup(dest, src, offset, vl);
288 }
289
290 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vslideup_vx_i32mf2
291 // CHECK-RV64-SAME: (<vscale x 1 x i32> [[DEST:%.*]], <vscale x 1 x i32> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
292 // CHECK-RV64-NEXT: entry:
293 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vslideup.nxv1i32.i64(<vscale x 1 x i32> [[DEST]], <vscale x 1 x i32> [[SRC]], i64 [[OFFSET]], i64 [[VL]], i64 3)
294 // CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
295 //
test_vslideup_vx_i32mf2(vint32mf2_t dest,vint32mf2_t src,size_t offset,size_t vl)296 vint32mf2_t test_vslideup_vx_i32mf2(vint32mf2_t dest, vint32mf2_t src, size_t offset, size_t vl) {
297 return __riscv_vslideup(dest, src, offset, vl);
298 }
299
300 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vslideup_vx_i32m1
301 // CHECK-RV64-SAME: (<vscale x 2 x i32> [[DEST:%.*]], <vscale x 2 x i32> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
302 // CHECK-RV64-NEXT: entry:
303 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vslideup.nxv2i32.i64(<vscale x 2 x i32> [[DEST]], <vscale x 2 x i32> [[SRC]], i64 [[OFFSET]], i64 [[VL]], i64 3)
304 // CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
305 //
test_vslideup_vx_i32m1(vint32m1_t dest,vint32m1_t src,size_t offset,size_t vl)306 vint32m1_t test_vslideup_vx_i32m1(vint32m1_t dest, vint32m1_t src, size_t offset, size_t vl) {
307 return __riscv_vslideup(dest, src, offset, vl);
308 }
309
310 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vslideup_vx_i32m2
311 // CHECK-RV64-SAME: (<vscale x 4 x i32> [[DEST:%.*]], <vscale x 4 x i32> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
312 // CHECK-RV64-NEXT: entry:
313 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vslideup.nxv4i32.i64(<vscale x 4 x i32> [[DEST]], <vscale x 4 x i32> [[SRC]], i64 [[OFFSET]], i64 [[VL]], i64 3)
314 // CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
315 //
test_vslideup_vx_i32m2(vint32m2_t dest,vint32m2_t src,size_t offset,size_t vl)316 vint32m2_t test_vslideup_vx_i32m2(vint32m2_t dest, vint32m2_t src, size_t offset, size_t vl) {
317 return __riscv_vslideup(dest, src, offset, vl);
318 }
319
320 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vslideup_vx_i32m4
321 // CHECK-RV64-SAME: (<vscale x 8 x i32> [[DEST:%.*]], <vscale x 8 x i32> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
322 // CHECK-RV64-NEXT: entry:
323 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vslideup.nxv8i32.i64(<vscale x 8 x i32> [[DEST]], <vscale x 8 x i32> [[SRC]], i64 [[OFFSET]], i64 [[VL]], i64 3)
324 // CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
325 //
test_vslideup_vx_i32m4(vint32m4_t dest,vint32m4_t src,size_t offset,size_t vl)326 vint32m4_t test_vslideup_vx_i32m4(vint32m4_t dest, vint32m4_t src, size_t offset, size_t vl) {
327 return __riscv_vslideup(dest, src, offset, vl);
328 }
329
330 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vslideup_vx_i32m8
331 // CHECK-RV64-SAME: (<vscale x 16 x i32> [[DEST:%.*]], <vscale x 16 x i32> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
332 // CHECK-RV64-NEXT: entry:
333 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vslideup.nxv16i32.i64(<vscale x 16 x i32> [[DEST]], <vscale x 16 x i32> [[SRC]], i64 [[OFFSET]], i64 [[VL]], i64 3)
334 // CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
335 //
test_vslideup_vx_i32m8(vint32m8_t dest,vint32m8_t src,size_t offset,size_t vl)336 vint32m8_t test_vslideup_vx_i32m8(vint32m8_t dest, vint32m8_t src, size_t offset, size_t vl) {
337 return __riscv_vslideup(dest, src, offset, vl);
338 }
339
340 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vslideup_vx_i64m1
341 // CHECK-RV64-SAME: (<vscale x 1 x i64> [[DEST:%.*]], <vscale x 1 x i64> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
342 // CHECK-RV64-NEXT: entry:
343 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vslideup.nxv1i64.i64(<vscale x 1 x i64> [[DEST]], <vscale x 1 x i64> [[SRC]], i64 [[OFFSET]], i64 [[VL]], i64 3)
344 // CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
345 //
test_vslideup_vx_i64m1(vint64m1_t dest,vint64m1_t src,size_t offset,size_t vl)346 vint64m1_t test_vslideup_vx_i64m1(vint64m1_t dest, vint64m1_t src, size_t offset, size_t vl) {
347 return __riscv_vslideup(dest, src, offset, vl);
348 }
349
350 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vslideup_vx_i64m2
351 // CHECK-RV64-SAME: (<vscale x 2 x i64> [[DEST:%.*]], <vscale x 2 x i64> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
352 // CHECK-RV64-NEXT: entry:
353 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vslideup.nxv2i64.i64(<vscale x 2 x i64> [[DEST]], <vscale x 2 x i64> [[SRC]], i64 [[OFFSET]], i64 [[VL]], i64 3)
354 // CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
355 //
test_vslideup_vx_i64m2(vint64m2_t dest,vint64m2_t src,size_t offset,size_t vl)356 vint64m2_t test_vslideup_vx_i64m2(vint64m2_t dest, vint64m2_t src, size_t offset, size_t vl) {
357 return __riscv_vslideup(dest, src, offset, vl);
358 }
359
360 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vslideup_vx_i64m4
361 // CHECK-RV64-SAME: (<vscale x 4 x i64> [[DEST:%.*]], <vscale x 4 x i64> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
362 // CHECK-RV64-NEXT: entry:
363 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vslideup.nxv4i64.i64(<vscale x 4 x i64> [[DEST]], <vscale x 4 x i64> [[SRC]], i64 [[OFFSET]], i64 [[VL]], i64 3)
364 // CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
365 //
test_vslideup_vx_i64m4(vint64m4_t dest,vint64m4_t src,size_t offset,size_t vl)366 vint64m4_t test_vslideup_vx_i64m4(vint64m4_t dest, vint64m4_t src, size_t offset, size_t vl) {
367 return __riscv_vslideup(dest, src, offset, vl);
368 }
369
370 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i64> @test_vslideup_vx_i64m8
371 // CHECK-RV64-SAME: (<vscale x 8 x i64> [[DEST:%.*]], <vscale x 8 x i64> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
372 // CHECK-RV64-NEXT: entry:
373 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vslideup.nxv8i64.i64(<vscale x 8 x i64> [[DEST]], <vscale x 8 x i64> [[SRC]], i64 [[OFFSET]], i64 [[VL]], i64 3)
374 // CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
375 //
test_vslideup_vx_i64m8(vint64m8_t dest,vint64m8_t src,size_t offset,size_t vl)376 vint64m8_t test_vslideup_vx_i64m8(vint64m8_t dest, vint64m8_t src, size_t offset, size_t vl) {
377 return __riscv_vslideup(dest, src, offset, vl);
378 }
379
380 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vslideup_vx_u8mf8
381 // CHECK-RV64-SAME: (<vscale x 1 x i8> [[DEST:%.*]], <vscale x 1 x i8> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
382 // CHECK-RV64-NEXT: entry:
383 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vslideup.nxv1i8.i64(<vscale x 1 x i8> [[DEST]], <vscale x 1 x i8> [[SRC]], i64 [[OFFSET]], i64 [[VL]], i64 3)
384 // CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
385 //
test_vslideup_vx_u8mf8(vuint8mf8_t dest,vuint8mf8_t src,size_t offset,size_t vl)386 vuint8mf8_t test_vslideup_vx_u8mf8(vuint8mf8_t dest, vuint8mf8_t src, size_t offset, size_t vl) {
387 return __riscv_vslideup(dest, src, offset, vl);
388 }
389
390 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vslideup_vx_u8mf4
391 // CHECK-RV64-SAME: (<vscale x 2 x i8> [[DEST:%.*]], <vscale x 2 x i8> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
392 // CHECK-RV64-NEXT: entry:
393 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vslideup.nxv2i8.i64(<vscale x 2 x i8> [[DEST]], <vscale x 2 x i8> [[SRC]], i64 [[OFFSET]], i64 [[VL]], i64 3)
394 // CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
395 //
test_vslideup_vx_u8mf4(vuint8mf4_t dest,vuint8mf4_t src,size_t offset,size_t vl)396 vuint8mf4_t test_vslideup_vx_u8mf4(vuint8mf4_t dest, vuint8mf4_t src, size_t offset, size_t vl) {
397 return __riscv_vslideup(dest, src, offset, vl);
398 }
399
400 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vslideup_vx_u8mf2
401 // CHECK-RV64-SAME: (<vscale x 4 x i8> [[DEST:%.*]], <vscale x 4 x i8> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
402 // CHECK-RV64-NEXT: entry:
403 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vslideup.nxv4i8.i64(<vscale x 4 x i8> [[DEST]], <vscale x 4 x i8> [[SRC]], i64 [[OFFSET]], i64 [[VL]], i64 3)
404 // CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
405 //
test_vslideup_vx_u8mf2(vuint8mf2_t dest,vuint8mf2_t src,size_t offset,size_t vl)406 vuint8mf2_t test_vslideup_vx_u8mf2(vuint8mf2_t dest, vuint8mf2_t src, size_t offset, size_t vl) {
407 return __riscv_vslideup(dest, src, offset, vl);
408 }
409
410 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vslideup_vx_u8m1
411 // CHECK-RV64-SAME: (<vscale x 8 x i8> [[DEST:%.*]], <vscale x 8 x i8> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
412 // CHECK-RV64-NEXT: entry:
413 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vslideup.nxv8i8.i64(<vscale x 8 x i8> [[DEST]], <vscale x 8 x i8> [[SRC]], i64 [[OFFSET]], i64 [[VL]], i64 3)
414 // CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
415 //
test_vslideup_vx_u8m1(vuint8m1_t dest,vuint8m1_t src,size_t offset,size_t vl)416 vuint8m1_t test_vslideup_vx_u8m1(vuint8m1_t dest, vuint8m1_t src, size_t offset, size_t vl) {
417 return __riscv_vslideup(dest, src, offset, vl);
418 }
419
420 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vslideup_vx_u8m2
421 // CHECK-RV64-SAME: (<vscale x 16 x i8> [[DEST:%.*]], <vscale x 16 x i8> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
422 // CHECK-RV64-NEXT: entry:
423 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vslideup.nxv16i8.i64(<vscale x 16 x i8> [[DEST]], <vscale x 16 x i8> [[SRC]], i64 [[OFFSET]], i64 [[VL]], i64 3)
424 // CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
425 //
test_vslideup_vx_u8m2(vuint8m2_t dest,vuint8m2_t src,size_t offset,size_t vl)426 vuint8m2_t test_vslideup_vx_u8m2(vuint8m2_t dest, vuint8m2_t src, size_t offset, size_t vl) {
427 return __riscv_vslideup(dest, src, offset, vl);
428 }
429
430 // CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vslideup_vx_u8m4
431 // CHECK-RV64-SAME: (<vscale x 32 x i8> [[DEST:%.*]], <vscale x 32 x i8> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
432 // CHECK-RV64-NEXT: entry:
433 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vslideup.nxv32i8.i64(<vscale x 32 x i8> [[DEST]], <vscale x 32 x i8> [[SRC]], i64 [[OFFSET]], i64 [[VL]], i64 3)
434 // CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
435 //
test_vslideup_vx_u8m4(vuint8m4_t dest,vuint8m4_t src,size_t offset,size_t vl)436 vuint8m4_t test_vslideup_vx_u8m4(vuint8m4_t dest, vuint8m4_t src, size_t offset, size_t vl) {
437 return __riscv_vslideup(dest, src, offset, vl);
438 }
439
440 // CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vslideup_vx_u8m8
441 // CHECK-RV64-SAME: (<vscale x 64 x i8> [[DEST:%.*]], <vscale x 64 x i8> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
442 // CHECK-RV64-NEXT: entry:
443 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vslideup.nxv64i8.i64(<vscale x 64 x i8> [[DEST]], <vscale x 64 x i8> [[SRC]], i64 [[OFFSET]], i64 [[VL]], i64 3)
444 // CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
445 //
test_vslideup_vx_u8m8(vuint8m8_t dest,vuint8m8_t src,size_t offset,size_t vl)446 vuint8m8_t test_vslideup_vx_u8m8(vuint8m8_t dest, vuint8m8_t src, size_t offset, size_t vl) {
447 return __riscv_vslideup(dest, src, offset, vl);
448 }
449
450 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vslideup_vx_u16mf4
451 // CHECK-RV64-SAME: (<vscale x 1 x i16> [[DEST:%.*]], <vscale x 1 x i16> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
452 // CHECK-RV64-NEXT: entry:
453 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vslideup.nxv1i16.i64(<vscale x 1 x i16> [[DEST]], <vscale x 1 x i16> [[SRC]], i64 [[OFFSET]], i64 [[VL]], i64 3)
454 // CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
455 //
test_vslideup_vx_u16mf4(vuint16mf4_t dest,vuint16mf4_t src,size_t offset,size_t vl)456 vuint16mf4_t test_vslideup_vx_u16mf4(vuint16mf4_t dest, vuint16mf4_t src, size_t offset, size_t vl) {
457 return __riscv_vslideup(dest, src, offset, vl);
458 }
459
460 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vslideup_vx_u16mf2
461 // CHECK-RV64-SAME: (<vscale x 2 x i16> [[DEST:%.*]], <vscale x 2 x i16> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
462 // CHECK-RV64-NEXT: entry:
463 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vslideup.nxv2i16.i64(<vscale x 2 x i16> [[DEST]], <vscale x 2 x i16> [[SRC]], i64 [[OFFSET]], i64 [[VL]], i64 3)
464 // CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
465 //
test_vslideup_vx_u16mf2(vuint16mf2_t dest,vuint16mf2_t src,size_t offset,size_t vl)466 vuint16mf2_t test_vslideup_vx_u16mf2(vuint16mf2_t dest, vuint16mf2_t src, size_t offset, size_t vl) {
467 return __riscv_vslideup(dest, src, offset, vl);
468 }
469
470 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vslideup_vx_u16m1
471 // CHECK-RV64-SAME: (<vscale x 4 x i16> [[DEST:%.*]], <vscale x 4 x i16> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
472 // CHECK-RV64-NEXT: entry:
473 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vslideup.nxv4i16.i64(<vscale x 4 x i16> [[DEST]], <vscale x 4 x i16> [[SRC]], i64 [[OFFSET]], i64 [[VL]], i64 3)
474 // CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
475 //
test_vslideup_vx_u16m1(vuint16m1_t dest,vuint16m1_t src,size_t offset,size_t vl)476 vuint16m1_t test_vslideup_vx_u16m1(vuint16m1_t dest, vuint16m1_t src, size_t offset, size_t vl) {
477 return __riscv_vslideup(dest, src, offset, vl);
478 }
479
480 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vslideup_vx_u16m2
481 // CHECK-RV64-SAME: (<vscale x 8 x i16> [[DEST:%.*]], <vscale x 8 x i16> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
482 // CHECK-RV64-NEXT: entry:
483 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vslideup.nxv8i16.i64(<vscale x 8 x i16> [[DEST]], <vscale x 8 x i16> [[SRC]], i64 [[OFFSET]], i64 [[VL]], i64 3)
484 // CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
485 //
test_vslideup_vx_u16m2(vuint16m2_t dest,vuint16m2_t src,size_t offset,size_t vl)486 vuint16m2_t test_vslideup_vx_u16m2(vuint16m2_t dest, vuint16m2_t src, size_t offset, size_t vl) {
487 return __riscv_vslideup(dest, src, offset, vl);
488 }
489
490 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vslideup_vx_u16m4
491 // CHECK-RV64-SAME: (<vscale x 16 x i16> [[DEST:%.*]], <vscale x 16 x i16> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
492 // CHECK-RV64-NEXT: entry:
493 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vslideup.nxv16i16.i64(<vscale x 16 x i16> [[DEST]], <vscale x 16 x i16> [[SRC]], i64 [[OFFSET]], i64 [[VL]], i64 3)
494 // CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
495 //
test_vslideup_vx_u16m4(vuint16m4_t dest,vuint16m4_t src,size_t offset,size_t vl)496 vuint16m4_t test_vslideup_vx_u16m4(vuint16m4_t dest, vuint16m4_t src, size_t offset, size_t vl) {
497 return __riscv_vslideup(dest, src, offset, vl);
498 }
499
500 // CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vslideup_vx_u16m8
501 // CHECK-RV64-SAME: (<vscale x 32 x i16> [[DEST:%.*]], <vscale x 32 x i16> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
502 // CHECK-RV64-NEXT: entry:
503 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vslideup.nxv32i16.i64(<vscale x 32 x i16> [[DEST]], <vscale x 32 x i16> [[SRC]], i64 [[OFFSET]], i64 [[VL]], i64 3)
504 // CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
505 //
test_vslideup_vx_u16m8(vuint16m8_t dest,vuint16m8_t src,size_t offset,size_t vl)506 vuint16m8_t test_vslideup_vx_u16m8(vuint16m8_t dest, vuint16m8_t src, size_t offset, size_t vl) {
507 return __riscv_vslideup(dest, src, offset, vl);
508 }
509
510 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vslideup_vx_u32mf2
511 // CHECK-RV64-SAME: (<vscale x 1 x i32> [[DEST:%.*]], <vscale x 1 x i32> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
512 // CHECK-RV64-NEXT: entry:
513 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vslideup.nxv1i32.i64(<vscale x 1 x i32> [[DEST]], <vscale x 1 x i32> [[SRC]], i64 [[OFFSET]], i64 [[VL]], i64 3)
514 // CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
515 //
test_vslideup_vx_u32mf2(vuint32mf2_t dest,vuint32mf2_t src,size_t offset,size_t vl)516 vuint32mf2_t test_vslideup_vx_u32mf2(vuint32mf2_t dest, vuint32mf2_t src, size_t offset, size_t vl) {
517 return __riscv_vslideup(dest, src, offset, vl);
518 }
519
520 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vslideup_vx_u32m1
521 // CHECK-RV64-SAME: (<vscale x 2 x i32> [[DEST:%.*]], <vscale x 2 x i32> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
522 // CHECK-RV64-NEXT: entry:
523 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vslideup.nxv2i32.i64(<vscale x 2 x i32> [[DEST]], <vscale x 2 x i32> [[SRC]], i64 [[OFFSET]], i64 [[VL]], i64 3)
524 // CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
525 //
test_vslideup_vx_u32m1(vuint32m1_t dest,vuint32m1_t src,size_t offset,size_t vl)526 vuint32m1_t test_vslideup_vx_u32m1(vuint32m1_t dest, vuint32m1_t src, size_t offset, size_t vl) {
527 return __riscv_vslideup(dest, src, offset, vl);
528 }
529
530 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vslideup_vx_u32m2
531 // CHECK-RV64-SAME: (<vscale x 4 x i32> [[DEST:%.*]], <vscale x 4 x i32> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
532 // CHECK-RV64-NEXT: entry:
533 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vslideup.nxv4i32.i64(<vscale x 4 x i32> [[DEST]], <vscale x 4 x i32> [[SRC]], i64 [[OFFSET]], i64 [[VL]], i64 3)
534 // CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
535 //
test_vslideup_vx_u32m2(vuint32m2_t dest,vuint32m2_t src,size_t offset,size_t vl)536 vuint32m2_t test_vslideup_vx_u32m2(vuint32m2_t dest, vuint32m2_t src, size_t offset, size_t vl) {
537 return __riscv_vslideup(dest, src, offset, vl);
538 }
539
540 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vslideup_vx_u32m4
541 // CHECK-RV64-SAME: (<vscale x 8 x i32> [[DEST:%.*]], <vscale x 8 x i32> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
542 // CHECK-RV64-NEXT: entry:
543 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vslideup.nxv8i32.i64(<vscale x 8 x i32> [[DEST]], <vscale x 8 x i32> [[SRC]], i64 [[OFFSET]], i64 [[VL]], i64 3)
544 // CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
545 //
test_vslideup_vx_u32m4(vuint32m4_t dest,vuint32m4_t src,size_t offset,size_t vl)546 vuint32m4_t test_vslideup_vx_u32m4(vuint32m4_t dest, vuint32m4_t src, size_t offset, size_t vl) {
547 return __riscv_vslideup(dest, src, offset, vl);
548 }
549
550 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vslideup_vx_u32m8
551 // CHECK-RV64-SAME: (<vscale x 16 x i32> [[DEST:%.*]], <vscale x 16 x i32> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
552 // CHECK-RV64-NEXT: entry:
553 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vslideup.nxv16i32.i64(<vscale x 16 x i32> [[DEST]], <vscale x 16 x i32> [[SRC]], i64 [[OFFSET]], i64 [[VL]], i64 3)
554 // CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
555 //
test_vslideup_vx_u32m8(vuint32m8_t dest,vuint32m8_t src,size_t offset,size_t vl)556 vuint32m8_t test_vslideup_vx_u32m8(vuint32m8_t dest, vuint32m8_t src, size_t offset, size_t vl) {
557 return __riscv_vslideup(dest, src, offset, vl);
558 }
559
560 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vslideup_vx_u64m1
561 // CHECK-RV64-SAME: (<vscale x 1 x i64> [[DEST:%.*]], <vscale x 1 x i64> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
562 // CHECK-RV64-NEXT: entry:
563 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vslideup.nxv1i64.i64(<vscale x 1 x i64> [[DEST]], <vscale x 1 x i64> [[SRC]], i64 [[OFFSET]], i64 [[VL]], i64 3)
564 // CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
565 //
test_vslideup_vx_u64m1(vuint64m1_t dest,vuint64m1_t src,size_t offset,size_t vl)566 vuint64m1_t test_vslideup_vx_u64m1(vuint64m1_t dest, vuint64m1_t src, size_t offset, size_t vl) {
567 return __riscv_vslideup(dest, src, offset, vl);
568 }
569
570 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vslideup_vx_u64m2
571 // CHECK-RV64-SAME: (<vscale x 2 x i64> [[DEST:%.*]], <vscale x 2 x i64> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
572 // CHECK-RV64-NEXT: entry:
573 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vslideup.nxv2i64.i64(<vscale x 2 x i64> [[DEST]], <vscale x 2 x i64> [[SRC]], i64 [[OFFSET]], i64 [[VL]], i64 3)
574 // CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
575 //
test_vslideup_vx_u64m2(vuint64m2_t dest,vuint64m2_t src,size_t offset,size_t vl)576 vuint64m2_t test_vslideup_vx_u64m2(vuint64m2_t dest, vuint64m2_t src, size_t offset, size_t vl) {
577 return __riscv_vslideup(dest, src, offset, vl);
578 }
579
580 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vslideup_vx_u64m4
581 // CHECK-RV64-SAME: (<vscale x 4 x i64> [[DEST:%.*]], <vscale x 4 x i64> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
582 // CHECK-RV64-NEXT: entry:
583 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vslideup.nxv4i64.i64(<vscale x 4 x i64> [[DEST]], <vscale x 4 x i64> [[SRC]], i64 [[OFFSET]], i64 [[VL]], i64 3)
584 // CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
585 //
test_vslideup_vx_u64m4(vuint64m4_t dest,vuint64m4_t src,size_t offset,size_t vl)586 vuint64m4_t test_vslideup_vx_u64m4(vuint64m4_t dest, vuint64m4_t src, size_t offset, size_t vl) {
587 return __riscv_vslideup(dest, src, offset, vl);
588 }
589
590 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i64> @test_vslideup_vx_u64m8
591 // CHECK-RV64-SAME: (<vscale x 8 x i64> [[DEST:%.*]], <vscale x 8 x i64> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
592 // CHECK-RV64-NEXT: entry:
593 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vslideup.nxv8i64.i64(<vscale x 8 x i64> [[DEST]], <vscale x 8 x i64> [[SRC]], i64 [[OFFSET]], i64 [[VL]], i64 3)
594 // CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
595 //
test_vslideup_vx_u64m8(vuint64m8_t dest,vuint64m8_t src,size_t offset,size_t vl)596 vuint64m8_t test_vslideup_vx_u64m8(vuint64m8_t dest, vuint64m8_t src, size_t offset, size_t vl) {
597 return __riscv_vslideup(dest, src, offset, vl);
598 }
599
600 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x half> @test_vslideup_vx_f16mf4_m
601 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x half> [[DEST:%.*]], <vscale x 1 x half> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
602 // CHECK-RV64-NEXT: entry:
603 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vslideup.mask.nxv1f16.i64(<vscale x 1 x half> [[DEST]], <vscale x 1 x half> [[SRC]], i64 [[OFFSET]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 3)
604 // CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
605 //
test_vslideup_vx_f16mf4_m(vbool64_t mask,vfloat16mf4_t dest,vfloat16mf4_t src,size_t offset,size_t vl)606 vfloat16mf4_t test_vslideup_vx_f16mf4_m(vbool64_t mask, vfloat16mf4_t dest, vfloat16mf4_t src, size_t offset, size_t vl) {
607 return __riscv_vslideup(mask, dest, src, offset, vl);
608 }
609
610 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x half> @test_vslideup_vx_f16mf2_m
611 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x half> [[DEST:%.*]], <vscale x 2 x half> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
612 // CHECK-RV64-NEXT: entry:
613 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vslideup.mask.nxv2f16.i64(<vscale x 2 x half> [[DEST]], <vscale x 2 x half> [[SRC]], i64 [[OFFSET]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 3)
614 // CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
615 //
test_vslideup_vx_f16mf2_m(vbool32_t mask,vfloat16mf2_t dest,vfloat16mf2_t src,size_t offset,size_t vl)616 vfloat16mf2_t test_vslideup_vx_f16mf2_m(vbool32_t mask, vfloat16mf2_t dest, vfloat16mf2_t src, size_t offset, size_t vl) {
617 return __riscv_vslideup(mask, dest, src, offset, vl);
618 }
619
620 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x half> @test_vslideup_vx_f16m1_m
621 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x half> [[DEST:%.*]], <vscale x 4 x half> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
622 // CHECK-RV64-NEXT: entry:
623 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vslideup.mask.nxv4f16.i64(<vscale x 4 x half> [[DEST]], <vscale x 4 x half> [[SRC]], i64 [[OFFSET]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 3)
624 // CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
625 //
test_vslideup_vx_f16m1_m(vbool16_t mask,vfloat16m1_t dest,vfloat16m1_t src,size_t offset,size_t vl)626 vfloat16m1_t test_vslideup_vx_f16m1_m(vbool16_t mask, vfloat16m1_t dest, vfloat16m1_t src, size_t offset, size_t vl) {
627 return __riscv_vslideup(mask, dest, src, offset, vl);
628 }
629
630 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x half> @test_vslideup_vx_f16m2_m
631 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x half> [[DEST:%.*]], <vscale x 8 x half> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
632 // CHECK-RV64-NEXT: entry:
633 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vslideup.mask.nxv8f16.i64(<vscale x 8 x half> [[DEST]], <vscale x 8 x half> [[SRC]], i64 [[OFFSET]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 3)
634 // CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
635 //
test_vslideup_vx_f16m2_m(vbool8_t mask,vfloat16m2_t dest,vfloat16m2_t src,size_t offset,size_t vl)636 vfloat16m2_t test_vslideup_vx_f16m2_m(vbool8_t mask, vfloat16m2_t dest, vfloat16m2_t src, size_t offset, size_t vl) {
637 return __riscv_vslideup(mask, dest, src, offset, vl);
638 }
639
640 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x half> @test_vslideup_vx_f16m4_m
641 // CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x half> [[DEST:%.*]], <vscale x 16 x half> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
642 // CHECK-RV64-NEXT: entry:
643 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vslideup.mask.nxv16f16.i64(<vscale x 16 x half> [[DEST]], <vscale x 16 x half> [[SRC]], i64 [[OFFSET]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 3)
644 // CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
645 //
test_vslideup_vx_f16m4_m(vbool4_t mask,vfloat16m4_t dest,vfloat16m4_t src,size_t offset,size_t vl)646 vfloat16m4_t test_vslideup_vx_f16m4_m(vbool4_t mask, vfloat16m4_t dest, vfloat16m4_t src, size_t offset, size_t vl) {
647 return __riscv_vslideup(mask, dest, src, offset, vl);
648 }
649
650 // CHECK-RV64-LABEL: define dso_local <vscale x 32 x half> @test_vslideup_vx_f16m8_m
651 // CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x half> [[DEST:%.*]], <vscale x 32 x half> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
652 // CHECK-RV64-NEXT: entry:
653 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vslideup.mask.nxv32f16.i64(<vscale x 32 x half> [[DEST]], <vscale x 32 x half> [[SRC]], i64 [[OFFSET]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 3)
654 // CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
655 //
test_vslideup_vx_f16m8_m(vbool2_t mask,vfloat16m8_t dest,vfloat16m8_t src,size_t offset,size_t vl)656 vfloat16m8_t test_vslideup_vx_f16m8_m(vbool2_t mask, vfloat16m8_t dest, vfloat16m8_t src, size_t offset, size_t vl) {
657 return __riscv_vslideup(mask, dest, src, offset, vl);
658 }
659
660 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vslideup_vx_f32mf2_m
661 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x float> [[DEST:%.*]], <vscale x 1 x float> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
662 // CHECK-RV64-NEXT: entry:
663 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vslideup.mask.nxv1f32.i64(<vscale x 1 x float> [[DEST]], <vscale x 1 x float> [[SRC]], i64 [[OFFSET]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 3)
664 // CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
665 //
test_vslideup_vx_f32mf2_m(vbool64_t mask,vfloat32mf2_t dest,vfloat32mf2_t src,size_t offset,size_t vl)666 vfloat32mf2_t test_vslideup_vx_f32mf2_m(vbool64_t mask, vfloat32mf2_t dest, vfloat32mf2_t src, size_t offset, size_t vl) {
667 return __riscv_vslideup(mask, dest, src, offset, vl);
668 }
669
670 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vslideup_vx_f32m1_m
671 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x float> [[DEST:%.*]], <vscale x 2 x float> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
672 // CHECK-RV64-NEXT: entry:
673 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vslideup.mask.nxv2f32.i64(<vscale x 2 x float> [[DEST]], <vscale x 2 x float> [[SRC]], i64 [[OFFSET]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 3)
674 // CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
675 //
test_vslideup_vx_f32m1_m(vbool32_t mask,vfloat32m1_t dest,vfloat32m1_t src,size_t offset,size_t vl)676 vfloat32m1_t test_vslideup_vx_f32m1_m(vbool32_t mask, vfloat32m1_t dest, vfloat32m1_t src, size_t offset, size_t vl) {
677 return __riscv_vslideup(mask, dest, src, offset, vl);
678 }
679
680 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vslideup_vx_f32m2_m
681 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x float> [[DEST:%.*]], <vscale x 4 x float> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
682 // CHECK-RV64-NEXT: entry:
683 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vslideup.mask.nxv4f32.i64(<vscale x 4 x float> [[DEST]], <vscale x 4 x float> [[SRC]], i64 [[OFFSET]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 3)
684 // CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
685 //
test_vslideup_vx_f32m2_m(vbool16_t mask,vfloat32m2_t dest,vfloat32m2_t src,size_t offset,size_t vl)686 vfloat32m2_t test_vslideup_vx_f32m2_m(vbool16_t mask, vfloat32m2_t dest, vfloat32m2_t src, size_t offset, size_t vl) {
687 return __riscv_vslideup(mask, dest, src, offset, vl);
688 }
689
690 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vslideup_vx_f32m4_m
691 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x float> [[DEST:%.*]], <vscale x 8 x float> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
692 // CHECK-RV64-NEXT: entry:
693 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vslideup.mask.nxv8f32.i64(<vscale x 8 x float> [[DEST]], <vscale x 8 x float> [[SRC]], i64 [[OFFSET]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 3)
694 // CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
695 //
test_vslideup_vx_f32m4_m(vbool8_t mask,vfloat32m4_t dest,vfloat32m4_t src,size_t offset,size_t vl)696 vfloat32m4_t test_vslideup_vx_f32m4_m(vbool8_t mask, vfloat32m4_t dest, vfloat32m4_t src, size_t offset, size_t vl) {
697 return __riscv_vslideup(mask, dest, src, offset, vl);
698 }
699
700 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vslideup_vx_f32m8_m
701 // CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x float> [[DEST:%.*]], <vscale x 16 x float> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
702 // CHECK-RV64-NEXT: entry:
703 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vslideup.mask.nxv16f32.i64(<vscale x 16 x float> [[DEST]], <vscale x 16 x float> [[SRC]], i64 [[OFFSET]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 3)
704 // CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
705 //
test_vslideup_vx_f32m8_m(vbool4_t mask,vfloat32m8_t dest,vfloat32m8_t src,size_t offset,size_t vl)706 vfloat32m8_t test_vslideup_vx_f32m8_m(vbool4_t mask, vfloat32m8_t dest, vfloat32m8_t src, size_t offset, size_t vl) {
707 return __riscv_vslideup(mask, dest, src, offset, vl);
708 }
709
710 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x double> @test_vslideup_vx_f64m1_m
711 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x double> [[DEST:%.*]], <vscale x 1 x double> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
712 // CHECK-RV64-NEXT: entry:
713 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vslideup.mask.nxv1f64.i64(<vscale x 1 x double> [[DEST]], <vscale x 1 x double> [[SRC]], i64 [[OFFSET]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 3)
714 // CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
715 //
test_vslideup_vx_f64m1_m(vbool64_t mask,vfloat64m1_t dest,vfloat64m1_t src,size_t offset,size_t vl)716 vfloat64m1_t test_vslideup_vx_f64m1_m(vbool64_t mask, vfloat64m1_t dest, vfloat64m1_t src, size_t offset, size_t vl) {
717 return __riscv_vslideup(mask, dest, src, offset, vl);
718 }
719
720 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x double> @test_vslideup_vx_f64m2_m
721 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x double> [[DEST:%.*]], <vscale x 2 x double> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
722 // CHECK-RV64-NEXT: entry:
723 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vslideup.mask.nxv2f64.i64(<vscale x 2 x double> [[DEST]], <vscale x 2 x double> [[SRC]], i64 [[OFFSET]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 3)
724 // CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
725 //
test_vslideup_vx_f64m2_m(vbool32_t mask,vfloat64m2_t dest,vfloat64m2_t src,size_t offset,size_t vl)726 vfloat64m2_t test_vslideup_vx_f64m2_m(vbool32_t mask, vfloat64m2_t dest, vfloat64m2_t src, size_t offset, size_t vl) {
727 return __riscv_vslideup(mask, dest, src, offset, vl);
728 }
729
730 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x double> @test_vslideup_vx_f64m4_m
731 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x double> [[DEST:%.*]], <vscale x 4 x double> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
732 // CHECK-RV64-NEXT: entry:
733 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vslideup.mask.nxv4f64.i64(<vscale x 4 x double> [[DEST]], <vscale x 4 x double> [[SRC]], i64 [[OFFSET]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 3)
734 // CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
735 //
test_vslideup_vx_f64m4_m(vbool16_t mask,vfloat64m4_t dest,vfloat64m4_t src,size_t offset,size_t vl)736 vfloat64m4_t test_vslideup_vx_f64m4_m(vbool16_t mask, vfloat64m4_t dest, vfloat64m4_t src, size_t offset, size_t vl) {
737 return __riscv_vslideup(mask, dest, src, offset, vl);
738 }
739
740 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x double> @test_vslideup_vx_f64m8_m
741 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x double> [[DEST:%.*]], <vscale x 8 x double> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
742 // CHECK-RV64-NEXT: entry:
743 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vslideup.mask.nxv8f64.i64(<vscale x 8 x double> [[DEST]], <vscale x 8 x double> [[SRC]], i64 [[OFFSET]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 3)
744 // CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
745 //
test_vslideup_vx_f64m8_m(vbool8_t mask,vfloat64m8_t dest,vfloat64m8_t src,size_t offset,size_t vl)746 vfloat64m8_t test_vslideup_vx_f64m8_m(vbool8_t mask, vfloat64m8_t dest, vfloat64m8_t src, size_t offset, size_t vl) {
747 return __riscv_vslideup(mask, dest, src, offset, vl);
748 }
749
750 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vslideup_vx_i8mf8_m
751 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i8> [[DEST:%.*]], <vscale x 1 x i8> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
752 // CHECK-RV64-NEXT: entry:
753 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vslideup.mask.nxv1i8.i64(<vscale x 1 x i8> [[DEST]], <vscale x 1 x i8> [[SRC]], i64 [[OFFSET]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 3)
754 // CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
755 //
test_vslideup_vx_i8mf8_m(vbool64_t mask,vint8mf8_t dest,vint8mf8_t src,size_t offset,size_t vl)756 vint8mf8_t test_vslideup_vx_i8mf8_m(vbool64_t mask, vint8mf8_t dest, vint8mf8_t src, size_t offset, size_t vl) {
757 return __riscv_vslideup(mask, dest, src, offset, vl);
758 }
759
760 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vslideup_vx_i8mf4_m
761 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i8> [[DEST:%.*]], <vscale x 2 x i8> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
762 // CHECK-RV64-NEXT: entry:
763 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vslideup.mask.nxv2i8.i64(<vscale x 2 x i8> [[DEST]], <vscale x 2 x i8> [[SRC]], i64 [[OFFSET]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 3)
764 // CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
765 //
test_vslideup_vx_i8mf4_m(vbool32_t mask,vint8mf4_t dest,vint8mf4_t src,size_t offset,size_t vl)766 vint8mf4_t test_vslideup_vx_i8mf4_m(vbool32_t mask, vint8mf4_t dest, vint8mf4_t src, size_t offset, size_t vl) {
767 return __riscv_vslideup(mask, dest, src, offset, vl);
768 }
769
770 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vslideup_vx_i8mf2_m
771 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i8> [[DEST:%.*]], <vscale x 4 x i8> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
772 // CHECK-RV64-NEXT: entry:
773 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vslideup.mask.nxv4i8.i64(<vscale x 4 x i8> [[DEST]], <vscale x 4 x i8> [[SRC]], i64 [[OFFSET]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 3)
774 // CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
775 //
test_vslideup_vx_i8mf2_m(vbool16_t mask,vint8mf2_t dest,vint8mf2_t src,size_t offset,size_t vl)776 vint8mf2_t test_vslideup_vx_i8mf2_m(vbool16_t mask, vint8mf2_t dest, vint8mf2_t src, size_t offset, size_t vl) {
777 return __riscv_vslideup(mask, dest, src, offset, vl);
778 }
779
780 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vslideup_vx_i8m1_m
781 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i8> [[DEST:%.*]], <vscale x 8 x i8> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
782 // CHECK-RV64-NEXT: entry:
783 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vslideup.mask.nxv8i8.i64(<vscale x 8 x i8> [[DEST]], <vscale x 8 x i8> [[SRC]], i64 [[OFFSET]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 3)
784 // CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
785 //
test_vslideup_vx_i8m1_m(vbool8_t mask,vint8m1_t dest,vint8m1_t src,size_t offset,size_t vl)786 vint8m1_t test_vslideup_vx_i8m1_m(vbool8_t mask, vint8m1_t dest, vint8m1_t src, size_t offset, size_t vl) {
787 return __riscv_vslideup(mask, dest, src, offset, vl);
788 }
789
790 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vslideup_vx_i8m2_m
791 // CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i8> [[DEST:%.*]], <vscale x 16 x i8> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
792 // CHECK-RV64-NEXT: entry:
793 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vslideup.mask.nxv16i8.i64(<vscale x 16 x i8> [[DEST]], <vscale x 16 x i8> [[SRC]], i64 [[OFFSET]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 3)
794 // CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
795 //
test_vslideup_vx_i8m2_m(vbool4_t mask,vint8m2_t dest,vint8m2_t src,size_t offset,size_t vl)796 vint8m2_t test_vslideup_vx_i8m2_m(vbool4_t mask, vint8m2_t dest, vint8m2_t src, size_t offset, size_t vl) {
797 return __riscv_vslideup(mask, dest, src, offset, vl);
798 }
799
800 // CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vslideup_vx_i8m4_m
801 // CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i8> [[DEST:%.*]], <vscale x 32 x i8> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
802 // CHECK-RV64-NEXT: entry:
803 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vslideup.mask.nxv32i8.i64(<vscale x 32 x i8> [[DEST]], <vscale x 32 x i8> [[SRC]], i64 [[OFFSET]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 3)
804 // CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
805 //
test_vslideup_vx_i8m4_m(vbool2_t mask,vint8m4_t dest,vint8m4_t src,size_t offset,size_t vl)806 vint8m4_t test_vslideup_vx_i8m4_m(vbool2_t mask, vint8m4_t dest, vint8m4_t src, size_t offset, size_t vl) {
807 return __riscv_vslideup(mask, dest, src, offset, vl);
808 }
809
810 // CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vslideup_vx_i8m8_m
811 // CHECK-RV64-SAME: (<vscale x 64 x i1> [[MASK:%.*]], <vscale x 64 x i8> [[DEST:%.*]], <vscale x 64 x i8> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
812 // CHECK-RV64-NEXT: entry:
813 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vslideup.mask.nxv64i8.i64(<vscale x 64 x i8> [[DEST]], <vscale x 64 x i8> [[SRC]], i64 [[OFFSET]], <vscale x 64 x i1> [[MASK]], i64 [[VL]], i64 3)
814 // CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
815 //
test_vslideup_vx_i8m8_m(vbool1_t mask,vint8m8_t dest,vint8m8_t src,size_t offset,size_t vl)816 vint8m8_t test_vslideup_vx_i8m8_m(vbool1_t mask, vint8m8_t dest, vint8m8_t src, size_t offset, size_t vl) {
817 return __riscv_vslideup(mask, dest, src, offset, vl);
818 }
819
820 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vslideup_vx_i16mf4_m
821 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i16> [[DEST:%.*]], <vscale x 1 x i16> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
822 // CHECK-RV64-NEXT: entry:
823 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vslideup.mask.nxv1i16.i64(<vscale x 1 x i16> [[DEST]], <vscale x 1 x i16> [[SRC]], i64 [[OFFSET]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 3)
824 // CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
825 //
test_vslideup_vx_i16mf4_m(vbool64_t mask,vint16mf4_t dest,vint16mf4_t src,size_t offset,size_t vl)826 vint16mf4_t test_vslideup_vx_i16mf4_m(vbool64_t mask, vint16mf4_t dest, vint16mf4_t src, size_t offset, size_t vl) {
827 return __riscv_vslideup(mask, dest, src, offset, vl);
828 }
829
830 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vslideup_vx_i16mf2_m
831 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i16> [[DEST:%.*]], <vscale x 2 x i16> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
832 // CHECK-RV64-NEXT: entry:
833 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vslideup.mask.nxv2i16.i64(<vscale x 2 x i16> [[DEST]], <vscale x 2 x i16> [[SRC]], i64 [[OFFSET]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 3)
834 // CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
835 //
test_vslideup_vx_i16mf2_m(vbool32_t mask,vint16mf2_t dest,vint16mf2_t src,size_t offset,size_t vl)836 vint16mf2_t test_vslideup_vx_i16mf2_m(vbool32_t mask, vint16mf2_t dest, vint16mf2_t src, size_t offset, size_t vl) {
837 return __riscv_vslideup(mask, dest, src, offset, vl);
838 }
839
840 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vslideup_vx_i16m1_m
841 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i16> [[DEST:%.*]], <vscale x 4 x i16> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
842 // CHECK-RV64-NEXT: entry:
843 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vslideup.mask.nxv4i16.i64(<vscale x 4 x i16> [[DEST]], <vscale x 4 x i16> [[SRC]], i64 [[OFFSET]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 3)
844 // CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
845 //
test_vslideup_vx_i16m1_m(vbool16_t mask,vint16m1_t dest,vint16m1_t src,size_t offset,size_t vl)846 vint16m1_t test_vslideup_vx_i16m1_m(vbool16_t mask, vint16m1_t dest, vint16m1_t src, size_t offset, size_t vl) {
847 return __riscv_vslideup(mask, dest, src, offset, vl);
848 }
849
850 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vslideup_vx_i16m2_m
851 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i16> [[DEST:%.*]], <vscale x 8 x i16> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
852 // CHECK-RV64-NEXT: entry:
853 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vslideup.mask.nxv8i16.i64(<vscale x 8 x i16> [[DEST]], <vscale x 8 x i16> [[SRC]], i64 [[OFFSET]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 3)
854 // CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
855 //
test_vslideup_vx_i16m2_m(vbool8_t mask,vint16m2_t dest,vint16m2_t src,size_t offset,size_t vl)856 vint16m2_t test_vslideup_vx_i16m2_m(vbool8_t mask, vint16m2_t dest, vint16m2_t src, size_t offset, size_t vl) {
857 return __riscv_vslideup(mask, dest, src, offset, vl);
858 }
859
860 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vslideup_vx_i16m4_m
861 // CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i16> [[DEST:%.*]], <vscale x 16 x i16> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
862 // CHECK-RV64-NEXT: entry:
863 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vslideup.mask.nxv16i16.i64(<vscale x 16 x i16> [[DEST]], <vscale x 16 x i16> [[SRC]], i64 [[OFFSET]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 3)
864 // CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
865 //
test_vslideup_vx_i16m4_m(vbool4_t mask,vint16m4_t dest,vint16m4_t src,size_t offset,size_t vl)866 vint16m4_t test_vslideup_vx_i16m4_m(vbool4_t mask, vint16m4_t dest, vint16m4_t src, size_t offset, size_t vl) {
867 return __riscv_vslideup(mask, dest, src, offset, vl);
868 }
869
870 // CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vslideup_vx_i16m8_m
871 // CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i16> [[DEST:%.*]], <vscale x 32 x i16> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
872 // CHECK-RV64-NEXT: entry:
873 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vslideup.mask.nxv32i16.i64(<vscale x 32 x i16> [[DEST]], <vscale x 32 x i16> [[SRC]], i64 [[OFFSET]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 3)
874 // CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
875 //
test_vslideup_vx_i16m8_m(vbool2_t mask,vint16m8_t dest,vint16m8_t src,size_t offset,size_t vl)876 vint16m8_t test_vslideup_vx_i16m8_m(vbool2_t mask, vint16m8_t dest, vint16m8_t src, size_t offset, size_t vl) {
877 return __riscv_vslideup(mask, dest, src, offset, vl);
878 }
879
880 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vslideup_vx_i32mf2_m
881 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i32> [[DEST:%.*]], <vscale x 1 x i32> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
882 // CHECK-RV64-NEXT: entry:
883 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vslideup.mask.nxv1i32.i64(<vscale x 1 x i32> [[DEST]], <vscale x 1 x i32> [[SRC]], i64 [[OFFSET]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 3)
884 // CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
885 //
test_vslideup_vx_i32mf2_m(vbool64_t mask,vint32mf2_t dest,vint32mf2_t src,size_t offset,size_t vl)886 vint32mf2_t test_vslideup_vx_i32mf2_m(vbool64_t mask, vint32mf2_t dest, vint32mf2_t src, size_t offset, size_t vl) {
887 return __riscv_vslideup(mask, dest, src, offset, vl);
888 }
889
890 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vslideup_vx_i32m1_m
891 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i32> [[DEST:%.*]], <vscale x 2 x i32> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
892 // CHECK-RV64-NEXT: entry:
893 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vslideup.mask.nxv2i32.i64(<vscale x 2 x i32> [[DEST]], <vscale x 2 x i32> [[SRC]], i64 [[OFFSET]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 3)
894 // CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
895 //
test_vslideup_vx_i32m1_m(vbool32_t mask,vint32m1_t dest,vint32m1_t src,size_t offset,size_t vl)896 vint32m1_t test_vslideup_vx_i32m1_m(vbool32_t mask, vint32m1_t dest, vint32m1_t src, size_t offset, size_t vl) {
897 return __riscv_vslideup(mask, dest, src, offset, vl);
898 }
899
900 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vslideup_vx_i32m2_m
901 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i32> [[DEST:%.*]], <vscale x 4 x i32> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
902 // CHECK-RV64-NEXT: entry:
903 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vslideup.mask.nxv4i32.i64(<vscale x 4 x i32> [[DEST]], <vscale x 4 x i32> [[SRC]], i64 [[OFFSET]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 3)
904 // CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
905 //
test_vslideup_vx_i32m2_m(vbool16_t mask,vint32m2_t dest,vint32m2_t src,size_t offset,size_t vl)906 vint32m2_t test_vslideup_vx_i32m2_m(vbool16_t mask, vint32m2_t dest, vint32m2_t src, size_t offset, size_t vl) {
907 return __riscv_vslideup(mask, dest, src, offset, vl);
908 }
909
910 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vslideup_vx_i32m4_m
911 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i32> [[DEST:%.*]], <vscale x 8 x i32> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
912 // CHECK-RV64-NEXT: entry:
913 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vslideup.mask.nxv8i32.i64(<vscale x 8 x i32> [[DEST]], <vscale x 8 x i32> [[SRC]], i64 [[OFFSET]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 3)
914 // CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
915 //
test_vslideup_vx_i32m4_m(vbool8_t mask,vint32m4_t dest,vint32m4_t src,size_t offset,size_t vl)916 vint32m4_t test_vslideup_vx_i32m4_m(vbool8_t mask, vint32m4_t dest, vint32m4_t src, size_t offset, size_t vl) {
917 return __riscv_vslideup(mask, dest, src, offset, vl);
918 }
919
920 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vslideup_vx_i32m8_m
921 // CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i32> [[DEST:%.*]], <vscale x 16 x i32> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
922 // CHECK-RV64-NEXT: entry:
923 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vslideup.mask.nxv16i32.i64(<vscale x 16 x i32> [[DEST]], <vscale x 16 x i32> [[SRC]], i64 [[OFFSET]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 3)
924 // CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
925 //
test_vslideup_vx_i32m8_m(vbool4_t mask,vint32m8_t dest,vint32m8_t src,size_t offset,size_t vl)926 vint32m8_t test_vslideup_vx_i32m8_m(vbool4_t mask, vint32m8_t dest, vint32m8_t src, size_t offset, size_t vl) {
927 return __riscv_vslideup(mask, dest, src, offset, vl);
928 }
929
930 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vslideup_vx_i64m1_m
931 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i64> [[DEST:%.*]], <vscale x 1 x i64> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
932 // CHECK-RV64-NEXT: entry:
933 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vslideup.mask.nxv1i64.i64(<vscale x 1 x i64> [[DEST]], <vscale x 1 x i64> [[SRC]], i64 [[OFFSET]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 3)
934 // CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
935 //
test_vslideup_vx_i64m1_m(vbool64_t mask,vint64m1_t dest,vint64m1_t src,size_t offset,size_t vl)936 vint64m1_t test_vslideup_vx_i64m1_m(vbool64_t mask, vint64m1_t dest, vint64m1_t src, size_t offset, size_t vl) {
937 return __riscv_vslideup(mask, dest, src, offset, vl);
938 }
939
940 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vslideup_vx_i64m2_m
941 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i64> [[DEST:%.*]], <vscale x 2 x i64> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
942 // CHECK-RV64-NEXT: entry:
943 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vslideup.mask.nxv2i64.i64(<vscale x 2 x i64> [[DEST]], <vscale x 2 x i64> [[SRC]], i64 [[OFFSET]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 3)
944 // CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
945 //
test_vslideup_vx_i64m2_m(vbool32_t mask,vint64m2_t dest,vint64m2_t src,size_t offset,size_t vl)946 vint64m2_t test_vslideup_vx_i64m2_m(vbool32_t mask, vint64m2_t dest, vint64m2_t src, size_t offset, size_t vl) {
947 return __riscv_vslideup(mask, dest, src, offset, vl);
948 }
949
950 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vslideup_vx_i64m4_m
951 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i64> [[DEST:%.*]], <vscale x 4 x i64> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
952 // CHECK-RV64-NEXT: entry:
953 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vslideup.mask.nxv4i64.i64(<vscale x 4 x i64> [[DEST]], <vscale x 4 x i64> [[SRC]], i64 [[OFFSET]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 3)
954 // CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
955 //
test_vslideup_vx_i64m4_m(vbool16_t mask,vint64m4_t dest,vint64m4_t src,size_t offset,size_t vl)956 vint64m4_t test_vslideup_vx_i64m4_m(vbool16_t mask, vint64m4_t dest, vint64m4_t src, size_t offset, size_t vl) {
957 return __riscv_vslideup(mask, dest, src, offset, vl);
958 }
959
960 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i64> @test_vslideup_vx_i64m8_m
961 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i64> [[DEST:%.*]], <vscale x 8 x i64> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
962 // CHECK-RV64-NEXT: entry:
963 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vslideup.mask.nxv8i64.i64(<vscale x 8 x i64> [[DEST]], <vscale x 8 x i64> [[SRC]], i64 [[OFFSET]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 3)
964 // CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
965 //
test_vslideup_vx_i64m8_m(vbool8_t mask,vint64m8_t dest,vint64m8_t src,size_t offset,size_t vl)966 vint64m8_t test_vslideup_vx_i64m8_m(vbool8_t mask, vint64m8_t dest, vint64m8_t src, size_t offset, size_t vl) {
967 return __riscv_vslideup(mask, dest, src, offset, vl);
968 }
969
970 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vslideup_vx_u8mf8_m
971 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i8> [[DEST:%.*]], <vscale x 1 x i8> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
972 // CHECK-RV64-NEXT: entry:
973 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vslideup.mask.nxv1i8.i64(<vscale x 1 x i8> [[DEST]], <vscale x 1 x i8> [[SRC]], i64 [[OFFSET]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 3)
974 // CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
975 //
test_vslideup_vx_u8mf8_m(vbool64_t mask,vuint8mf8_t dest,vuint8mf8_t src,size_t offset,size_t vl)976 vuint8mf8_t test_vslideup_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t dest, vuint8mf8_t src, size_t offset, size_t vl) {
977 return __riscv_vslideup(mask, dest, src, offset, vl);
978 }
979
980 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vslideup_vx_u8mf4_m
981 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i8> [[DEST:%.*]], <vscale x 2 x i8> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
982 // CHECK-RV64-NEXT: entry:
983 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vslideup.mask.nxv2i8.i64(<vscale x 2 x i8> [[DEST]], <vscale x 2 x i8> [[SRC]], i64 [[OFFSET]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 3)
984 // CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
985 //
test_vslideup_vx_u8mf4_m(vbool32_t mask,vuint8mf4_t dest,vuint8mf4_t src,size_t offset,size_t vl)986 vuint8mf4_t test_vslideup_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t dest, vuint8mf4_t src, size_t offset, size_t vl) {
987 return __riscv_vslideup(mask, dest, src, offset, vl);
988 }
989
990 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vslideup_vx_u8mf2_m
991 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i8> [[DEST:%.*]], <vscale x 4 x i8> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
992 // CHECK-RV64-NEXT: entry:
993 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vslideup.mask.nxv4i8.i64(<vscale x 4 x i8> [[DEST]], <vscale x 4 x i8> [[SRC]], i64 [[OFFSET]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 3)
994 // CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
995 //
test_vslideup_vx_u8mf2_m(vbool16_t mask,vuint8mf2_t dest,vuint8mf2_t src,size_t offset,size_t vl)996 vuint8mf2_t test_vslideup_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t dest, vuint8mf2_t src, size_t offset, size_t vl) {
997 return __riscv_vslideup(mask, dest, src, offset, vl);
998 }
999
1000 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vslideup_vx_u8m1_m
1001 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i8> [[DEST:%.*]], <vscale x 8 x i8> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1002 // CHECK-RV64-NEXT: entry:
1003 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vslideup.mask.nxv8i8.i64(<vscale x 8 x i8> [[DEST]], <vscale x 8 x i8> [[SRC]], i64 [[OFFSET]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 3)
1004 // CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
1005 //
test_vslideup_vx_u8m1_m(vbool8_t mask,vuint8m1_t dest,vuint8m1_t src,size_t offset,size_t vl)1006 vuint8m1_t test_vslideup_vx_u8m1_m(vbool8_t mask, vuint8m1_t dest, vuint8m1_t src, size_t offset, size_t vl) {
1007 return __riscv_vslideup(mask, dest, src, offset, vl);
1008 }
1009
1010 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vslideup_vx_u8m2_m
1011 // CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i8> [[DEST:%.*]], <vscale x 16 x i8> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1012 // CHECK-RV64-NEXT: entry:
1013 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vslideup.mask.nxv16i8.i64(<vscale x 16 x i8> [[DEST]], <vscale x 16 x i8> [[SRC]], i64 [[OFFSET]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 3)
1014 // CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
1015 //
test_vslideup_vx_u8m2_m(vbool4_t mask,vuint8m2_t dest,vuint8m2_t src,size_t offset,size_t vl)1016 vuint8m2_t test_vslideup_vx_u8m2_m(vbool4_t mask, vuint8m2_t dest, vuint8m2_t src, size_t offset, size_t vl) {
1017 return __riscv_vslideup(mask, dest, src, offset, vl);
1018 }
1019
1020 // CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vslideup_vx_u8m4_m
1021 // CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i8> [[DEST:%.*]], <vscale x 32 x i8> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1022 // CHECK-RV64-NEXT: entry:
1023 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vslideup.mask.nxv32i8.i64(<vscale x 32 x i8> [[DEST]], <vscale x 32 x i8> [[SRC]], i64 [[OFFSET]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 3)
1024 // CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
1025 //
test_vslideup_vx_u8m4_m(vbool2_t mask,vuint8m4_t dest,vuint8m4_t src,size_t offset,size_t vl)1026 vuint8m4_t test_vslideup_vx_u8m4_m(vbool2_t mask, vuint8m4_t dest, vuint8m4_t src, size_t offset, size_t vl) {
1027 return __riscv_vslideup(mask, dest, src, offset, vl);
1028 }
1029
1030 // CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vslideup_vx_u8m8_m
1031 // CHECK-RV64-SAME: (<vscale x 64 x i1> [[MASK:%.*]], <vscale x 64 x i8> [[DEST:%.*]], <vscale x 64 x i8> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1032 // CHECK-RV64-NEXT: entry:
1033 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vslideup.mask.nxv64i8.i64(<vscale x 64 x i8> [[DEST]], <vscale x 64 x i8> [[SRC]], i64 [[OFFSET]], <vscale x 64 x i1> [[MASK]], i64 [[VL]], i64 3)
1034 // CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
1035 //
test_vslideup_vx_u8m8_m(vbool1_t mask,vuint8m8_t dest,vuint8m8_t src,size_t offset,size_t vl)1036 vuint8m8_t test_vslideup_vx_u8m8_m(vbool1_t mask, vuint8m8_t dest, vuint8m8_t src, size_t offset, size_t vl) {
1037 return __riscv_vslideup(mask, dest, src, offset, vl);
1038 }
1039
1040 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vslideup_vx_u16mf4_m
1041 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i16> [[DEST:%.*]], <vscale x 1 x i16> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1042 // CHECK-RV64-NEXT: entry:
1043 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vslideup.mask.nxv1i16.i64(<vscale x 1 x i16> [[DEST]], <vscale x 1 x i16> [[SRC]], i64 [[OFFSET]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 3)
1044 // CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
1045 //
test_vslideup_vx_u16mf4_m(vbool64_t mask,vuint16mf4_t dest,vuint16mf4_t src,size_t offset,size_t vl)1046 vuint16mf4_t test_vslideup_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t dest, vuint16mf4_t src, size_t offset, size_t vl) {
1047 return __riscv_vslideup(mask, dest, src, offset, vl);
1048 }
1049
1050 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vslideup_vx_u16mf2_m
1051 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i16> [[DEST:%.*]], <vscale x 2 x i16> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1052 // CHECK-RV64-NEXT: entry:
1053 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vslideup.mask.nxv2i16.i64(<vscale x 2 x i16> [[DEST]], <vscale x 2 x i16> [[SRC]], i64 [[OFFSET]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 3)
1054 // CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
1055 //
test_vslideup_vx_u16mf2_m(vbool32_t mask,vuint16mf2_t dest,vuint16mf2_t src,size_t offset,size_t vl)1056 vuint16mf2_t test_vslideup_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t dest, vuint16mf2_t src, size_t offset, size_t vl) {
1057 return __riscv_vslideup(mask, dest, src, offset, vl);
1058 }
1059
1060 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vslideup_vx_u16m1_m
1061 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i16> [[DEST:%.*]], <vscale x 4 x i16> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1062 // CHECK-RV64-NEXT: entry:
1063 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vslideup.mask.nxv4i16.i64(<vscale x 4 x i16> [[DEST]], <vscale x 4 x i16> [[SRC]], i64 [[OFFSET]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 3)
1064 // CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
1065 //
test_vslideup_vx_u16m1_m(vbool16_t mask,vuint16m1_t dest,vuint16m1_t src,size_t offset,size_t vl)1066 vuint16m1_t test_vslideup_vx_u16m1_m(vbool16_t mask, vuint16m1_t dest, vuint16m1_t src, size_t offset, size_t vl) {
1067 return __riscv_vslideup(mask, dest, src, offset, vl);
1068 }
1069
1070 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vslideup_vx_u16m2_m
1071 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i16> [[DEST:%.*]], <vscale x 8 x i16> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1072 // CHECK-RV64-NEXT: entry:
1073 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vslideup.mask.nxv8i16.i64(<vscale x 8 x i16> [[DEST]], <vscale x 8 x i16> [[SRC]], i64 [[OFFSET]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 3)
1074 // CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
1075 //
test_vslideup_vx_u16m2_m(vbool8_t mask,vuint16m2_t dest,vuint16m2_t src,size_t offset,size_t vl)1076 vuint16m2_t test_vslideup_vx_u16m2_m(vbool8_t mask, vuint16m2_t dest, vuint16m2_t src, size_t offset, size_t vl) {
1077 return __riscv_vslideup(mask, dest, src, offset, vl);
1078 }
1079
1080 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vslideup_vx_u16m4_m
1081 // CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i16> [[DEST:%.*]], <vscale x 16 x i16> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1082 // CHECK-RV64-NEXT: entry:
1083 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vslideup.mask.nxv16i16.i64(<vscale x 16 x i16> [[DEST]], <vscale x 16 x i16> [[SRC]], i64 [[OFFSET]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 3)
1084 // CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
1085 //
test_vslideup_vx_u16m4_m(vbool4_t mask,vuint16m4_t dest,vuint16m4_t src,size_t offset,size_t vl)1086 vuint16m4_t test_vslideup_vx_u16m4_m(vbool4_t mask, vuint16m4_t dest, vuint16m4_t src, size_t offset, size_t vl) {
1087 return __riscv_vslideup(mask, dest, src, offset, vl);
1088 }
1089
1090 // CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vslideup_vx_u16m8_m
1091 // CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i16> [[DEST:%.*]], <vscale x 32 x i16> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1092 // CHECK-RV64-NEXT: entry:
1093 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vslideup.mask.nxv32i16.i64(<vscale x 32 x i16> [[DEST]], <vscale x 32 x i16> [[SRC]], i64 [[OFFSET]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 3)
1094 // CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
1095 //
test_vslideup_vx_u16m8_m(vbool2_t mask,vuint16m8_t dest,vuint16m8_t src,size_t offset,size_t vl)1096 vuint16m8_t test_vslideup_vx_u16m8_m(vbool2_t mask, vuint16m8_t dest, vuint16m8_t src, size_t offset, size_t vl) {
1097 return __riscv_vslideup(mask, dest, src, offset, vl);
1098 }
1099
1100 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vslideup_vx_u32mf2_m
1101 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i32> [[DEST:%.*]], <vscale x 1 x i32> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1102 // CHECK-RV64-NEXT: entry:
1103 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vslideup.mask.nxv1i32.i64(<vscale x 1 x i32> [[DEST]], <vscale x 1 x i32> [[SRC]], i64 [[OFFSET]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 3)
1104 // CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
1105 //
test_vslideup_vx_u32mf2_m(vbool64_t mask,vuint32mf2_t dest,vuint32mf2_t src,size_t offset,size_t vl)1106 vuint32mf2_t test_vslideup_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t dest, vuint32mf2_t src, size_t offset, size_t vl) {
1107 return __riscv_vslideup(mask, dest, src, offset, vl);
1108 }
1109
1110 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vslideup_vx_u32m1_m
1111 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i32> [[DEST:%.*]], <vscale x 2 x i32> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1112 // CHECK-RV64-NEXT: entry:
1113 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vslideup.mask.nxv2i32.i64(<vscale x 2 x i32> [[DEST]], <vscale x 2 x i32> [[SRC]], i64 [[OFFSET]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 3)
1114 // CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
1115 //
test_vslideup_vx_u32m1_m(vbool32_t mask,vuint32m1_t dest,vuint32m1_t src,size_t offset,size_t vl)1116 vuint32m1_t test_vslideup_vx_u32m1_m(vbool32_t mask, vuint32m1_t dest, vuint32m1_t src, size_t offset, size_t vl) {
1117 return __riscv_vslideup(mask, dest, src, offset, vl);
1118 }
1119
1120 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vslideup_vx_u32m2_m
1121 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i32> [[DEST:%.*]], <vscale x 4 x i32> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1122 // CHECK-RV64-NEXT: entry:
1123 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vslideup.mask.nxv4i32.i64(<vscale x 4 x i32> [[DEST]], <vscale x 4 x i32> [[SRC]], i64 [[OFFSET]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 3)
1124 // CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
1125 //
test_vslideup_vx_u32m2_m(vbool16_t mask,vuint32m2_t dest,vuint32m2_t src,size_t offset,size_t vl)1126 vuint32m2_t test_vslideup_vx_u32m2_m(vbool16_t mask, vuint32m2_t dest, vuint32m2_t src, size_t offset, size_t vl) {
1127 return __riscv_vslideup(mask, dest, src, offset, vl);
1128 }
1129
1130 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vslideup_vx_u32m4_m
1131 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i32> [[DEST:%.*]], <vscale x 8 x i32> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1132 // CHECK-RV64-NEXT: entry:
1133 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vslideup.mask.nxv8i32.i64(<vscale x 8 x i32> [[DEST]], <vscale x 8 x i32> [[SRC]], i64 [[OFFSET]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 3)
1134 // CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
1135 //
test_vslideup_vx_u32m4_m(vbool8_t mask,vuint32m4_t dest,vuint32m4_t src,size_t offset,size_t vl)1136 vuint32m4_t test_vslideup_vx_u32m4_m(vbool8_t mask, vuint32m4_t dest, vuint32m4_t src, size_t offset, size_t vl) {
1137 return __riscv_vslideup(mask, dest, src, offset, vl);
1138 }
1139
1140 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vslideup_vx_u32m8_m
1141 // CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i32> [[DEST:%.*]], <vscale x 16 x i32> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1142 // CHECK-RV64-NEXT: entry:
1143 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vslideup.mask.nxv16i32.i64(<vscale x 16 x i32> [[DEST]], <vscale x 16 x i32> [[SRC]], i64 [[OFFSET]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 3)
1144 // CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
1145 //
test_vslideup_vx_u32m8_m(vbool4_t mask,vuint32m8_t dest,vuint32m8_t src,size_t offset,size_t vl)1146 vuint32m8_t test_vslideup_vx_u32m8_m(vbool4_t mask, vuint32m8_t dest, vuint32m8_t src, size_t offset, size_t vl) {
1147 return __riscv_vslideup(mask, dest, src, offset, vl);
1148 }
1149
1150 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vslideup_vx_u64m1_m
1151 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i64> [[DEST:%.*]], <vscale x 1 x i64> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1152 // CHECK-RV64-NEXT: entry:
1153 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vslideup.mask.nxv1i64.i64(<vscale x 1 x i64> [[DEST]], <vscale x 1 x i64> [[SRC]], i64 [[OFFSET]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 3)
1154 // CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
1155 //
test_vslideup_vx_u64m1_m(vbool64_t mask,vuint64m1_t dest,vuint64m1_t src,size_t offset,size_t vl)1156 vuint64m1_t test_vslideup_vx_u64m1_m(vbool64_t mask, vuint64m1_t dest, vuint64m1_t src, size_t offset, size_t vl) {
1157 return __riscv_vslideup(mask, dest, src, offset, vl);
1158 }
1159
1160 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vslideup_vx_u64m2_m
1161 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i64> [[DEST:%.*]], <vscale x 2 x i64> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1162 // CHECK-RV64-NEXT: entry:
1163 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vslideup.mask.nxv2i64.i64(<vscale x 2 x i64> [[DEST]], <vscale x 2 x i64> [[SRC]], i64 [[OFFSET]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 3)
1164 // CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
1165 //
test_vslideup_vx_u64m2_m(vbool32_t mask,vuint64m2_t dest,vuint64m2_t src,size_t offset,size_t vl)1166 vuint64m2_t test_vslideup_vx_u64m2_m(vbool32_t mask, vuint64m2_t dest, vuint64m2_t src, size_t offset, size_t vl) {
1167 return __riscv_vslideup(mask, dest, src, offset, vl);
1168 }
1169
1170 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vslideup_vx_u64m4_m
1171 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i64> [[DEST:%.*]], <vscale x 4 x i64> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1172 // CHECK-RV64-NEXT: entry:
1173 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vslideup.mask.nxv4i64.i64(<vscale x 4 x i64> [[DEST]], <vscale x 4 x i64> [[SRC]], i64 [[OFFSET]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 3)
1174 // CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
1175 //
test_vslideup_vx_u64m4_m(vbool16_t mask,vuint64m4_t dest,vuint64m4_t src,size_t offset,size_t vl)1176 vuint64m4_t test_vslideup_vx_u64m4_m(vbool16_t mask, vuint64m4_t dest, vuint64m4_t src, size_t offset, size_t vl) {
1177 return __riscv_vslideup(mask, dest, src, offset, vl);
1178 }
1179
1180 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i64> @test_vslideup_vx_u64m8_m
1181 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i64> [[DEST:%.*]], <vscale x 8 x i64> [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1182 // CHECK-RV64-NEXT: entry:
1183 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vslideup.mask.nxv8i64.i64(<vscale x 8 x i64> [[DEST]], <vscale x 8 x i64> [[SRC]], i64 [[OFFSET]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 3)
1184 // CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
1185 //
test_vslideup_vx_u64m8_m(vbool8_t mask,vuint64m8_t dest,vuint64m8_t src,size_t offset,size_t vl)1186 vuint64m8_t test_vslideup_vx_u64m8_m(vbool8_t mask, vuint64m8_t dest, vuint64m8_t src, size_t offset, size_t vl) {
1187 return __riscv_vslideup(mask, dest, src, offset, vl);
1188 }
1189
1190