xref: /llvm-project/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vp-splat.ll (revision 97982a8c605fac7c86d02e641a6cd7898b3ca343)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
2; RUN: llc -mtriple=riscv32 -mattr=+v,+d,+zvfh -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV32
3; RUN: llc -mtriple=riscv64 -mattr=+v,+d,+zvfh -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64
4
5define <1 x i8> @vp_splat_v1i8(i8 %val, <1 x i1> %m, i32 zeroext %evl) {
6; CHECK-LABEL: vp_splat_v1i8:
7; CHECK:       # %bb.0:
8; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
9; CHECK-NEXT:    vmv.v.x v8, a0
10; CHECK-NEXT:    ret
11  %splat = call <1 x i8> @llvm.experimental.vp.splat.v1i8(i8 %val, <1 x i1> %m, i32 %evl)
12  ret <1 x i8> %splat
13}
14
15define <2 x i8> @vp_splat_v2i8(i8 %val, <2 x i1> %m, i32 zeroext %evl) {
16; CHECK-LABEL: vp_splat_v2i8:
17; CHECK:       # %bb.0:
18; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
19; CHECK-NEXT:    vmv.v.x v8, a0
20; CHECK-NEXT:    ret
21  %splat = call <2 x i8> @llvm.experimental.vp.splat.v2i8(i8 %val, <2 x i1> %m, i32 %evl)
22  ret <2 x i8> %splat
23}
24
25define <4 x i8> @vp_splat_v4i8(i8 %val, <4 x i1> %m, i32 zeroext %evl) {
26; CHECK-LABEL: vp_splat_v4i8:
27; CHECK:       # %bb.0:
28; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
29; CHECK-NEXT:    vmv.v.x v8, a0
30; CHECK-NEXT:    ret
31  %splat = call <4 x i8> @llvm.experimental.vp.splat.v4i8(i8 %val, <4 x i1> %m, i32 %evl)
32  ret <4 x i8> %splat
33}
34
35define <8 x i8> @vp_splat_v8i8(i8 %val, <8 x i1> %m, i32 zeroext %evl) {
36; CHECK-LABEL: vp_splat_v8i8:
37; CHECK:       # %bb.0:
38; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
39; CHECK-NEXT:    vmv.v.x v8, a0
40; CHECK-NEXT:    ret
41  %splat = call <8 x i8> @llvm.experimental.vp.splat.v8i8(i8 %val, <8 x i1> %m, i32 %evl)
42  ret <8 x i8> %splat
43}
44
45define <16 x i8> @vp_splat_v16i8(i8 %val, <16 x i1> %m, i32 zeroext %evl) {
46; CHECK-LABEL: vp_splat_v16i8:
47; CHECK:       # %bb.0:
48; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
49; CHECK-NEXT:    vmv.v.x v8, a0
50; CHECK-NEXT:    ret
51  %splat = call <16 x i8> @llvm.experimental.vp.splat.v16i8(i8 %val, <16 x i1> %m, i32 %evl)
52  ret <16 x i8> %splat
53}
54
55define <32 x i8> @vp_splat_v32i8(i8 %val, <32 x i1> %m, i32 zeroext %evl) {
56; CHECK-LABEL: vp_splat_v32i8:
57; CHECK:       # %bb.0:
58; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
59; CHECK-NEXT:    vmv.v.x v8, a0
60; CHECK-NEXT:    ret
61  %splat = call <32 x i8> @llvm.experimental.vp.splat.v32i8(i8 %val, <32 x i1> %m, i32 %evl)
62  ret <32 x i8> %splat
63}
64
65define <64 x i8> @vp_splat_v64i8(i8 %val, <64 x i1> %m, i32 zeroext %evl) {
66; CHECK-LABEL: vp_splat_v64i8:
67; CHECK:       # %bb.0:
68; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
69; CHECK-NEXT:    vmv.v.x v8, a0
70; CHECK-NEXT:    ret
71  %splat = call <64 x i8> @llvm.experimental.vp.splat.v64i8(i8 %val, <64 x i1> %m, i32 %evl)
72  ret <64 x i8> %splat
73}
74
75define <1 x i16> @vp_splat_v1i16(i16 %val, <1 x i1> %m, i32 zeroext %evl) {
76; CHECK-LABEL: vp_splat_v1i16:
77; CHECK:       # %bb.0:
78; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
79; CHECK-NEXT:    vmv.v.x v8, a0
80; CHECK-NEXT:    ret
81  %splat = call <1 x i16> @llvm.experimental.vp.splat.v1i16(i16 %val, <1 x i1> %m, i32 %evl)
82  ret <1 x i16> %splat
83}
84
85define <2 x i16> @vp_splat_v2i16(i16 %val, <2 x i1> %m, i32 zeroext %evl) {
86; CHECK-LABEL: vp_splat_v2i16:
87; CHECK:       # %bb.0:
88; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
89; CHECK-NEXT:    vmv.v.x v8, a0
90; CHECK-NEXT:    ret
91  %splat = call <2 x i16> @llvm.experimental.vp.splat.v2i16(i16 %val, <2 x i1> %m, i32 %evl)
92  ret <2 x i16> %splat
93}
94
95define <4 x i16> @vp_splat_v4i16(i16 %val, <4 x i1> %m, i32 zeroext %evl) {
96; CHECK-LABEL: vp_splat_v4i16:
97; CHECK:       # %bb.0:
98; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
99; CHECK-NEXT:    vmv.v.x v8, a0
100; CHECK-NEXT:    ret
101  %splat = call <4 x i16> @llvm.experimental.vp.splat.v4i16(i16 %val, <4 x i1> %m, i32 %evl)
102  ret <4 x i16> %splat
103}
104
105define <8 x i16> @vp_splat_v8i16(i16 %val, <8 x i1> %m, i32 zeroext %evl) {
106; CHECK-LABEL: vp_splat_v8i16:
107; CHECK:       # %bb.0:
108; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
109; CHECK-NEXT:    vmv.v.x v8, a0
110; CHECK-NEXT:    ret
111  %splat = call <8 x i16> @llvm.experimental.vp.splat.v8i16(i16 %val, <8 x i1> %m, i32 %evl)
112  ret <8 x i16> %splat
113}
114
115define <16 x i16> @vp_splat_v16i16(i16 %val, <16 x i1> %m, i32 zeroext %evl) {
116; CHECK-LABEL: vp_splat_v16i16:
117; CHECK:       # %bb.0:
118; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
119; CHECK-NEXT:    vmv.v.x v8, a0
120; CHECK-NEXT:    ret
121  %splat = call <16 x i16> @llvm.experimental.vp.splat.v16i16(i16 %val, <16 x i1> %m, i32 %evl)
122  ret <16 x i16> %splat
123}
124
125define <32 x i16> @vp_splat_v32i16(i16 %val, <32 x i1> %m, i32 zeroext %evl) {
126; CHECK-LABEL: vp_splat_v32i16:
127; CHECK:       # %bb.0:
128; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
129; CHECK-NEXT:    vmv.v.x v8, a0
130; CHECK-NEXT:    ret
131  %splat = call <32 x i16> @llvm.experimental.vp.splat.v32i16(i16 %val, <32 x i1> %m, i32 %evl)
132  ret <32 x i16> %splat
133}
134
135define <1 x i32> @vp_splat_v1i32(i32 %val, <1 x i1> %m, i32 zeroext %evl) {
136; CHECK-LABEL: vp_splat_v1i32:
137; CHECK:       # %bb.0:
138; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
139; CHECK-NEXT:    vmv.v.x v8, a0
140; CHECK-NEXT:    ret
141  %splat = call <1 x i32> @llvm.experimental.vp.splat.v1i32(i32 %val, <1 x i1> %m, i32 %evl)
142  ret <1 x i32> %splat
143}
144
145define <2 x i32> @vp_splat_v2i32(i32 %val, <2 x i1> %m, i32 zeroext %evl) {
146; CHECK-LABEL: vp_splat_v2i32:
147; CHECK:       # %bb.0:
148; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
149; CHECK-NEXT:    vmv.v.x v8, a0
150; CHECK-NEXT:    ret
151  %splat = call <2 x i32> @llvm.experimental.vp.splat.v2i32(i32 %val, <2 x i1> %m, i32 %evl)
152  ret <2 x i32> %splat
153}
154
155define <4 x i32> @vp_splat_v4i32(i32 %val, <4 x i1> %m, i32 zeroext %evl) {
156; CHECK-LABEL: vp_splat_v4i32:
157; CHECK:       # %bb.0:
158; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
159; CHECK-NEXT:    vmv.v.x v8, a0
160; CHECK-NEXT:    ret
161  %splat = call <4 x i32> @llvm.experimental.vp.splat.v4i32(i32 %val, <4 x i1> %m, i32 %evl)
162  ret <4 x i32> %splat
163}
164
165define <8 x i32> @vp_splat_v8i32(i32 %val, <8 x i1> %m, i32 zeroext %evl) {
166; CHECK-LABEL: vp_splat_v8i32:
167; CHECK:       # %bb.0:
168; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
169; CHECK-NEXT:    vmv.v.x v8, a0
170; CHECK-NEXT:    ret
171  %splat = call <8 x i32> @llvm.experimental.vp.splat.v8i32(i32 %val, <8 x i1> %m, i32 %evl)
172  ret <8 x i32> %splat
173}
174
175define <16 x i32> @vp_splat_v16i32(i32 %val, <16 x i1> %m, i32 zeroext %evl) {
176; CHECK-LABEL: vp_splat_v16i32:
177; CHECK:       # %bb.0:
178; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
179; CHECK-NEXT:    vmv.v.x v8, a0
180; CHECK-NEXT:    ret
181  %splat = call <16 x i32> @llvm.experimental.vp.splat.v16i32(i32 %val, <16 x i1> %m, i32 %evl)
182  ret <16 x i32> %splat
183}
184
185define <1 x i64> @vp_splat_v1i64(i64 %val, <1 x i1> %m, i32 zeroext %evl) {
186; RV32-LABEL: vp_splat_v1i64:
187; RV32:       # %bb.0:
188; RV32-NEXT:    addi sp, sp, -16
189; RV32-NEXT:    .cfi_def_cfa_offset 16
190; RV32-NEXT:    sw a0, 8(sp)
191; RV32-NEXT:    sw a1, 12(sp)
192; RV32-NEXT:    addi a0, sp, 8
193; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
194; RV32-NEXT:    vlse64.v v8, (a0), zero
195; RV32-NEXT:    addi sp, sp, 16
196; RV32-NEXT:    .cfi_def_cfa_offset 0
197; RV32-NEXT:    ret
198;
199; RV64-LABEL: vp_splat_v1i64:
200; RV64:       # %bb.0:
201; RV64-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
202; RV64-NEXT:    vmv.v.x v8, a0
203; RV64-NEXT:    ret
204  %splat = call <1 x i64> @llvm.experimental.vp.splat.v1i64(i64 %val, <1 x i1> %m, i32 %evl)
205  ret <1 x i64> %splat
206}
207
208define <2 x i64> @vp_splat_v2i64(i64 %val, <2 x i1> %m, i32 zeroext %evl) {
209; RV32-LABEL: vp_splat_v2i64:
210; RV32:       # %bb.0:
211; RV32-NEXT:    addi sp, sp, -16
212; RV32-NEXT:    .cfi_def_cfa_offset 16
213; RV32-NEXT:    sw a0, 8(sp)
214; RV32-NEXT:    sw a1, 12(sp)
215; RV32-NEXT:    addi a0, sp, 8
216; RV32-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
217; RV32-NEXT:    vlse64.v v8, (a0), zero
218; RV32-NEXT:    addi sp, sp, 16
219; RV32-NEXT:    .cfi_def_cfa_offset 0
220; RV32-NEXT:    ret
221;
222; RV64-LABEL: vp_splat_v2i64:
223; RV64:       # %bb.0:
224; RV64-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
225; RV64-NEXT:    vmv.v.x v8, a0
226; RV64-NEXT:    ret
227  %splat = call <2 x i64> @llvm.experimental.vp.splat.v2i64(i64 %val, <2 x i1> %m, i32 %evl)
228  ret <2 x i64> %splat
229}
230
231define <4 x i64> @vp_splat_v4i64(i64 %val, <4 x i1> %m, i32 zeroext %evl) {
232; RV32-LABEL: vp_splat_v4i64:
233; RV32:       # %bb.0:
234; RV32-NEXT:    addi sp, sp, -16
235; RV32-NEXT:    .cfi_def_cfa_offset 16
236; RV32-NEXT:    sw a0, 8(sp)
237; RV32-NEXT:    sw a1, 12(sp)
238; RV32-NEXT:    addi a0, sp, 8
239; RV32-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
240; RV32-NEXT:    vlse64.v v8, (a0), zero
241; RV32-NEXT:    addi sp, sp, 16
242; RV32-NEXT:    .cfi_def_cfa_offset 0
243; RV32-NEXT:    ret
244;
245; RV64-LABEL: vp_splat_v4i64:
246; RV64:       # %bb.0:
247; RV64-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
248; RV64-NEXT:    vmv.v.x v8, a0
249; RV64-NEXT:    ret
250  %splat = call <4 x i64> @llvm.experimental.vp.splat.v4i64(i64 %val, <4 x i1> %m, i32 %evl)
251  ret <4 x i64> %splat
252}
253
254define <8 x i64> @vp_splat_v8i64(i64 %val, <8 x i1> %m, i32 zeroext %evl) {
255; RV32-LABEL: vp_splat_v8i64:
256; RV32:       # %bb.0:
257; RV32-NEXT:    addi sp, sp, -16
258; RV32-NEXT:    .cfi_def_cfa_offset 16
259; RV32-NEXT:    sw a0, 8(sp)
260; RV32-NEXT:    sw a1, 12(sp)
261; RV32-NEXT:    addi a0, sp, 8
262; RV32-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
263; RV32-NEXT:    vlse64.v v8, (a0), zero
264; RV32-NEXT:    addi sp, sp, 16
265; RV32-NEXT:    .cfi_def_cfa_offset 0
266; RV32-NEXT:    ret
267;
268; RV64-LABEL: vp_splat_v8i64:
269; RV64:       # %bb.0:
270; RV64-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
271; RV64-NEXT:    vmv.v.x v8, a0
272; RV64-NEXT:    ret
273  %splat = call <8 x i64> @llvm.experimental.vp.splat.v8i64(i64 %val, <8 x i1> %m, i32 %evl)
274  ret <8 x i64> %splat
275}
276
277define <1 x half> @vp_splat_v1f16(half %val, <1 x i1> %m, i32 zeroext %evl) {
278; CHECK-LABEL: vp_splat_v1f16:
279; CHECK:       # %bb.0:
280; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
281; CHECK-NEXT:    vfmv.v.f v8, fa0
282; CHECK-NEXT:    ret
283  %splat = call <1 x half> @llvm.experimental.vp.splat.v1f16(half %val, <1 x i1> %m, i32 %evl)
284  ret <1 x half> %splat
285}
286
287define <2 x half> @vp_splat_v2f16(half %val, <2 x i1> %m, i32 zeroext %evl) {
288; CHECK-LABEL: vp_splat_v2f16:
289; CHECK:       # %bb.0:
290; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
291; CHECK-NEXT:    vfmv.v.f v8, fa0
292; CHECK-NEXT:    ret
293  %splat = call <2 x half> @llvm.experimental.vp.splat.v2f16(half %val, <2 x i1> %m, i32 %evl)
294  ret <2 x half> %splat
295}
296
297define <4 x half> @vp_splat_v4f16(half %val, <4 x i1> %m, i32 zeroext %evl) {
298; CHECK-LABEL: vp_splat_v4f16:
299; CHECK:       # %bb.0:
300; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
301; CHECK-NEXT:    vfmv.v.f v8, fa0
302; CHECK-NEXT:    ret
303  %splat = call <4 x half> @llvm.experimental.vp.splat.v4f16(half %val, <4 x i1> %m, i32 %evl)
304  ret <4 x half> %splat
305}
306
307define <8 x half> @vp_splat_v8f16(half %val, <8 x i1> %m, i32 zeroext %evl) {
308; CHECK-LABEL: vp_splat_v8f16:
309; CHECK:       # %bb.0:
310; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
311; CHECK-NEXT:    vfmv.v.f v8, fa0
312; CHECK-NEXT:    ret
313  %splat = call <8 x half> @llvm.experimental.vp.splat.v8f16(half %val, <8 x i1> %m, i32 %evl)
314  ret <8 x half> %splat
315}
316
317define <16 x half> @vp_splat_v16f16(half %val, <16 x i1> %m, i32 zeroext %evl) {
318; CHECK-LABEL: vp_splat_v16f16:
319; CHECK:       # %bb.0:
320; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
321; CHECK-NEXT:    vfmv.v.f v8, fa0
322; CHECK-NEXT:    ret
323  %splat = call <16 x half> @llvm.experimental.vp.splat.v16f16(half %val, <16 x i1> %m, i32 %evl)
324  ret <16 x half> %splat
325}
326
327define <32 x half> @vp_splat_v32f16(half %val, <32 x i1> %m, i32 zeroext %evl) {
328; CHECK-LABEL: vp_splat_v32f16:
329; CHECK:       # %bb.0:
330; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
331; CHECK-NEXT:    vfmv.v.f v8, fa0
332; CHECK-NEXT:    ret
333  %splat = call <32 x half> @llvm.experimental.vp.splat.v32f16(half %val, <32 x i1> %m, i32 %evl)
334  ret <32 x half> %splat
335}
336
337define <1 x float> @vp_splat_v1f32(float %val, <1 x i1> %m, i32 zeroext %evl) {
338; CHECK-LABEL: vp_splat_v1f32:
339; CHECK:       # %bb.0:
340; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
341; CHECK-NEXT:    vfmv.v.f v8, fa0
342; CHECK-NEXT:    ret
343  %splat = call <1 x float> @llvm.experimental.vp.splat.v1f32(float %val, <1 x i1> %m, i32 %evl)
344  ret <1 x float> %splat
345}
346
347define <2 x float> @vp_splat_v2f32(float %val, <2 x i1> %m, i32 zeroext %evl) {
348; CHECK-LABEL: vp_splat_v2f32:
349; CHECK:       # %bb.0:
350; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
351; CHECK-NEXT:    vfmv.v.f v8, fa0
352; CHECK-NEXT:    ret
353  %splat = call <2 x float> @llvm.experimental.vp.splat.v2f32(float %val, <2 x i1> %m, i32 %evl)
354  ret <2 x float> %splat
355}
356
357define <4 x float> @vp_splat_v4f32(float %val, <4 x i1> %m, i32 zeroext %evl) {
358; CHECK-LABEL: vp_splat_v4f32:
359; CHECK:       # %bb.0:
360; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
361; CHECK-NEXT:    vfmv.v.f v8, fa0
362; CHECK-NEXT:    ret
363  %splat = call <4 x float> @llvm.experimental.vp.splat.v4f32(float %val, <4 x i1> %m, i32 %evl)
364  ret <4 x float> %splat
365}
366
367define <8 x float> @vp_splat_v8f32(float %val, <8 x i1> %m, i32 zeroext %evl) {
368; CHECK-LABEL: vp_splat_v8f32:
369; CHECK:       # %bb.0:
370; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
371; CHECK-NEXT:    vfmv.v.f v8, fa0
372; CHECK-NEXT:    ret
373  %splat = call <8 x float> @llvm.experimental.vp.splat.v8f32(float %val, <8 x i1> %m, i32 %evl)
374  ret <8 x float> %splat
375}
376
377define <16 x float> @vp_splat_v16f32(float %val, <16 x i1> %m, i32 zeroext %evl) {
378; CHECK-LABEL: vp_splat_v16f32:
379; CHECK:       # %bb.0:
380; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
381; CHECK-NEXT:    vfmv.v.f v8, fa0
382; CHECK-NEXT:    ret
383  %splat = call <16 x float> @llvm.experimental.vp.splat.v16f32(float %val, <16 x i1> %m, i32 %evl)
384  ret <16 x float> %splat
385}
386
387define <1 x double> @vp_splat_v1f64(double %val, <1 x i1> %m, i32 zeroext %evl) {
388; CHECK-LABEL: vp_splat_v1f64:
389; CHECK:       # %bb.0:
390; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
391; CHECK-NEXT:    vfmv.v.f v8, fa0
392; CHECK-NEXT:    ret
393  %splat = call <1 x double> @llvm.experimental.vp.splat.v1f64(double %val, <1 x i1> %m, i32 %evl)
394  ret <1 x double> %splat
395}
396
397define <2 x double> @vp_splat_v2f64(double %val, <2 x i1> %m, i32 zeroext %evl) {
398; CHECK-LABEL: vp_splat_v2f64:
399; CHECK:       # %bb.0:
400; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
401; CHECK-NEXT:    vfmv.v.f v8, fa0
402; CHECK-NEXT:    ret
403  %splat = call <2 x double> @llvm.experimental.vp.splat.v2f64(double %val, <2 x i1> %m, i32 %evl)
404  ret <2 x double> %splat
405}
406
407define <4 x double> @vp_splat_v4f64(double %val, <4 x i1> %m, i32 zeroext %evl) {
408; CHECK-LABEL: vp_splat_v4f64:
409; CHECK:       # %bb.0:
410; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
411; CHECK-NEXT:    vfmv.v.f v8, fa0
412; CHECK-NEXT:    ret
413  %splat = call <4 x double> @llvm.experimental.vp.splat.v4f64(double %val, <4 x i1> %m, i32 %evl)
414  ret <4 x double> %splat
415}
416
417define <8 x double> @vp_splat_v8f64(double %val, <8 x i1> %m, i32 zeroext %evl) {
418; CHECK-LABEL: vp_splat_v8f64:
419; CHECK:       # %bb.0:
420; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
421; CHECK-NEXT:    vfmv.v.f v8, fa0
422; CHECK-NEXT:    ret
423  %splat = call <8 x double> @llvm.experimental.vp.splat.v8f64(double %val, <8 x i1> %m, i32 %evl)
424  ret <8 x double> %splat
425}
426
427define <16 x i31> @vp_splat_v16i31(i31 %val, <16 x i1> %m, i32 zeroext %evl) {
428; CHECK-LABEL: vp_splat_v16i31:
429; CHECK:       # %bb.0:
430; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
431; CHECK-NEXT:    vmv.v.x v8, a0
432; CHECK-NEXT:    ret
433  %splat = call <16 x i31> @llvm.experimental.vp.splat.v16i31(i31 %val, <16 x i1> %m, i32 %evl)
434  ret <16 x i31> %splat
435}
436
437define <15 x i32> @vp_splat_v15i32(i32 %val, <15 x i1> %m, i32 zeroext %evl) {
438; CHECK-LABEL: vp_splat_v15i32:
439; CHECK:       # %bb.0:
440; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
441; CHECK-NEXT:    vmv.v.x v8, a0
442; CHECK-NEXT:    ret
443  %splat = call <15 x i32> @llvm.experimental.vp.splat.v15i32(i32 %val, <15 x i1> %m, i32 %evl)
444  ret <15 x i32> %splat
445}
446
447; Split case.
448define <32 x i32> @vp_splat_v32i32(i32 %val, <32 x i1> %m, i32 zeroext %evl) {
449; CHECK-LABEL: vp_splat_v32i32:
450; CHECK:       # %bb.0:
451; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
452; CHECK-NEXT:    vmv.v.x v8, a0
453; CHECK-NEXT:    ret
454  %splat = call <32 x i32> @llvm.experimental.vp.splat.v32i32(i32 %val, <32 x i1> %m, i32 %evl)
455  ret <32 x i32> %splat
456}
457